changeset 2:735a0e24575c

Organized folders: added tests, apps, matlab, docs folders. Added __init__.py files
author nikcleju
date Fri, 21 Oct 2011 13:53:49 +0000
parents 2a2abf5092f8
children 537f7798e186
files BP/cgsolve.m BP/l1qc.py BP/l1qc_logbarrier.m BP/l1qc_newton.m OMP/greed_omp_qr.m OMP/omp_QR.py OMP/omp_app.py OMP/omp_sk_bugfix.py OMP/omp_test.py apps/omp_app.py matlab/BP/cgsolve.m matlab/BP/l1qc_logbarrier.m matlab/BP/l1qc_newton.m matlab/OMP/greed_omp_qr.m pyCSalgos/BP/__init__.py pyCSalgos/BP/l1qc.py pyCSalgos/OMP/__init__.py pyCSalgos/OMP/omp_QR.py pyCSalgos/OMP/omp_sk_bugfix.py pyCSalgos/__init__.py tests/l1qc_test.py tests/omp_test.py
diffstat 19 files changed, 3243 insertions(+), 3236 deletions(-) [+]
line wrap: on
line diff
--- a/BP/cgsolve.m	Thu Oct 20 21:06:06 2011 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,76 +0,0 @@
-% cgsolve.m
-%
-% Solve a symmetric positive definite system Ax = b via conjugate gradients.
-%
-% Usage: [x, res, iter] = cgsolve(A, b, tol, maxiter, verbose)
-%
-% A - Either an NxN matrix, or a function handle.
-%
-% b - N vector
-%
-% tol - Desired precision.  Algorithm terminates when 
-%    norm(Ax-b)/norm(b) < tol .
-%
-% maxiter - Maximum number of iterations.
-%
-% verbose - If 0, do not print out progress messages.
-%    If and integer greater than 0, print out progress every 'verbose' iters.
-%
-% Written by: Justin Romberg, Caltech
-% Email: jrom@acm.caltech.edu
-% Created: October 2005
-%
-
-function [x, res, iter] = cgsolve(A, b, tol, maxiter, verbose)
-
-if (nargin < 5), verbose = 1; end
-
-implicit = isa(A,'function_handle');
-
-x = zeros(length(b),1);
-r = b;
-d = r;
-delta = r'*r;
-delta0 = b'*b;
-numiter = 0;
-bestx = x;
-bestres = sqrt(delta/delta0); 
-while ((numiter < maxiter) && (delta > tol^2*delta0))
-
-  % q = A*d
-  if (implicit), q = A(d);  else  q = A*d;  end
- 
-  alpha = delta/(d'*q);
-  x = x + alpha*d;
-  
-  if (mod(numiter+1,50) == 0)
-    % r = b - Aux*x
-    if (implicit), r = b - A(x);  else  r = b - A*x;  end
-  else
-    r = r - alpha*q;
-  end
-  
-  deltaold = delta;
-  delta = r'*r;
-  beta = delta/deltaold;
-  d = r + beta*d;
-  numiter = numiter + 1;
-  if (sqrt(delta/delta0) < bestres)
-    bestx = x;
-    bestres = sqrt(delta/delta0);
-  end    
-  
-  if ((verbose) && (mod(numiter,verbose)==0))
-    disp(sprintf('cg: Iter = %d, Best residual = %8.3e, Current residual = %8.3e', ...
-      numiter, bestres, sqrt(delta/delta0)));
-  end
-  
-end
-
-if (verbose)
-  disp(sprintf('cg: Iterations = %d, best residual = %14.8e', numiter, bestres));
-end
-x = bestx;
-res = bestres;
-iter = numiter;
-
--- a/BP/l1qc.py	Thu Oct 20 21:06:06 2011 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,705 +0,0 @@
-
-import numpy as np
-import scipy.linalg
-import math
-
-
-#function [x, res, iter] = cgsolve(A, b, tol, maxiter, verbose)
-def cgsolve(A, b, tol, maxiter, verbose=1):
-    # Solve a symmetric positive definite system Ax = b via conjugate gradients.
-    #
-    # Usage: [x, res, iter] = cgsolve(A, b, tol, maxiter, verbose)
-    #
-    # A - Either an NxN matrix, or a function handle.
-    #
-    # b - N vector
-    #
-    # tol - Desired precision.  Algorithm terminates when 
-    #    norm(Ax-b)/norm(b) < tol .
-    #
-    # maxiter - Maximum number of iterations.
-    #
-    # verbose - If 0, do not print out progress messages.
-    #    If and integer greater than 0, print out progress every 'verbose' iters.
-    #
-    # Written by: Justin Romberg, Caltech
-    # Email: jrom@acm.caltech.edu
-    # Created: October 2005
-    #
-
-    #---------------------
-    # Original Matab code:
-    #        
-    #if (nargin < 5), verbose = 1; end
-    #
-    #implicit = isa(A,'function_handle');
-    #
-    #x = zeros(length(b),1);
-    #r = b;
-    #d = r;
-    #delta = r'*r;
-    #delta0 = b'*b;
-    #numiter = 0;
-    #bestx = x;
-    #bestres = sqrt(delta/delta0); 
-    #while ((numiter < maxiter) && (delta > tol^2*delta0))
-    #
-    #  # q = A*d
-    #  if (implicit), q = A(d);  else  q = A*d;  end
-    # 
-    #  alpha = delta/(d'*q);
-    #  x = x + alpha*d;
-    #  
-    #  if (mod(numiter+1,50) == 0)
-    #    # r = b - Aux*x
-    #    if (implicit), r = b - A(x);  else  r = b - A*x;  end
-    #  else
-    #    r = r - alpha*q;
-    #  end
-    #  
-    #  deltaold = delta;
-    #  delta = r'*r;
-    #  beta = delta/deltaold;
-    #  d = r + beta*d;
-    #  numiter = numiter + 1;
-    #  if (sqrt(delta/delta0) < bestres)
-    #    bestx = x;
-    #    bestres = sqrt(delta/delta0);
-    #  end    
-    #  
-    #  if ((verbose) && (mod(numiter,verbose)==0))
-    #    disp(sprintf('cg: Iter = #d, Best residual = #8.3e, Current residual = #8.3e', ...
-    #      numiter, bestres, sqrt(delta/delta0)));
-    #  end
-    #  
-    #end
-    #
-    #if (verbose)
-    #  disp(sprintf('cg: Iterations = #d, best residual = #14.8e', numiter, bestres));
-    #end
-    #x = bestx;
-    #res = bestres;
-    #iter = numiter;
-
-    # End of original Matab code
-    #----------------------------
-    
-    #if (nargin < 5), verbose = 1; end
-    # Optional argument
-    
-    #implicit = isa(A,'function_handle');
-    if hasattr(A, '__call__'):
-        implicit = True
-    else:
-        implicit = False
-    
-    x = np.zeros(b.size)
-    r = b.copy()
-    d = r.copy()
-    delta = np.vdot(r,r)
-    delta0 = np.vdot(b,b)
-    numiter = 0
-    bestx = x.copy()
-    bestres = math.sqrt(delta/delta0)
-    while (numiter < maxiter) and (delta > tol**2*delta0):
-    
-      # q = A*d
-      #if (implicit), q = A(d);  else  q = A*d;  end
-      if implicit:
-          q = A(d)
-      else:
-          q = np.dot(A,d)
-     
-      alpha = delta/np.vdot(d,q)
-      x = x + alpha*d
-      
-      if divmod(numiter+1,50)[1] == 0:
-        # r = b - Aux*x
-        #if (implicit), r = b - A(x);  else  r = b - A*x;  end
-        if implicit:
-            r = b - A(x)
-        else:
-            r = b - np.dot(A,x)
-      else:
-        r = r - alpha*q
-      #end
-      
-      deltaold = delta;
-      delta = np.vdot(r,r)
-      beta = delta/deltaold;
-      d = r + beta*d
-      numiter = numiter + 1
-      if (math.sqrt(delta/delta0) < bestres):
-        bestx = x.copy()
-        bestres = math.sqrt(delta/delta0)
-      #end    
-      
-      if ((verbose) and (divmod(numiter,verbose)[1]==0)):
-        #disp(sprintf('cg: Iter = #d, Best residual = #8.3e, Current residual = #8.3e', ...
-        #  numiter, bestres, sqrt(delta/delta0)));
-        print 'cg: Iter = ',numiter,', Best residual = ',bestres,', Current residual = ',math.sqrt(delta/delta0)
-      #end
-      
-    #end
-    
-    if (verbose):
-      #disp(sprintf('cg: Iterations = #d, best residual = #14.8e', numiter, bestres));
-      print 'cg: Iterations = ',numiter,', best residual = ',bestres
-    #end
-    x = bestx.copy()
-    res = bestres
-    iter = numiter
-    
-    return x,res,iter
-
-
-
-#function [xp, up, niter] = l1qc_newton(x0, u0, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter) 
-def l1qc_newton(x0, u0, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter):
-    # Newton algorithm for log-barrier subproblems for l1 minimization
-    # with quadratic constraints.
-    #
-    # Usage: 
-    # [xp,up,niter] = l1qc_newton(x0, u0, A, At, b, epsilon, tau, 
-    #                             newtontol, newtonmaxiter, cgtol, cgmaxiter)
-    #
-    # x0,u0 - starting points
-    #
-    # A - Either a handle to a function that takes a N vector and returns a K 
-    #     vector , or a KxN matrix.  If A is a function handle, the algorithm
-    #     operates in "largescale" mode, solving the Newton systems via the
-    #     Conjugate Gradients algorithm.
-    #
-    # At - Handle to a function that takes a K vector and returns an N vector.
-    #      If A is a KxN matrix, At is ignored.
-    #
-    # b - Kx1 vector of observations.
-    #
-    # epsilon - scalar, constraint relaxation parameter
-    #
-    # tau - Log barrier parameter.
-    #
-    # newtontol - Terminate when the Newton decrement is <= newtontol.
-    #         Default = 1e-3.
-    #
-    # newtonmaxiter - Maximum number of iterations.
-    #         Default = 50.
-    #
-    # cgtol - Tolerance for Conjugate Gradients; ignored if A is a matrix.
-    #     Default = 1e-8.
-    #
-    # cgmaxiter - Maximum number of iterations for Conjugate Gradients; ignored
-    #     if A is a matrix.
-    #     Default = 200.
-    #
-    # Written by: Justin Romberg, Caltech
-    # Email: jrom@acm.caltech.edu
-    # Created: October 2005
-    #
-    
-    #---------------------
-    # Original Matab code:
-    
-    ## check if the matrix A is implicit or explicit
-    #largescale = isa(A,'function_handle');
-    #
-    ## line search parameters
-    #alpha = 0.01;
-    #beta = 0.5;  
-    #
-    #if (~largescale), AtA = A'*A; end
-    #
-    ## initial point
-    #x = x0;
-    #u = u0;
-    #if (largescale), r = A(x) - b; else  r = A*x - b; end
-    #fu1 = x - u;
-    #fu2 = -x - u;
-    #fe = 1/2*(r'*r - epsilon^2);
-    #f = sum(u) - (1/tau)*(sum(log(-fu1)) + sum(log(-fu2)) + log(-fe));
-    #
-    #niter = 0;
-    #done = 0;
-    #while (~done)
-    #  
-    #  if (largescale), atr = At(r); else  atr = A'*r; end
-    #  
-    #  ntgz = 1./fu1 - 1./fu2 + 1/fe*atr;
-    #  ntgu = -tau - 1./fu1 - 1./fu2;
-    #  gradf = -(1/tau)*[ntgz; ntgu];
-    #  
-    #  sig11 = 1./fu1.^2 + 1./fu2.^2;
-    #  sig12 = -1./fu1.^2 + 1./fu2.^2;
-    #  sigx = sig11 - sig12.^2./sig11;
-    #    
-    #  w1p = ntgz - sig12./sig11.*ntgu;
-    #  if (largescale)
-    #    h11pfun = @(z) sigx.*z - (1/fe)*At(A(z)) + 1/fe^2*(atr'*z)*atr;
-    #    [dx, cgres, cgiter] = cgsolve(h11pfun, w1p, cgtol, cgmaxiter, 0);
-    #    if (cgres > 1/2)
-    #      disp('Cannot solve system.  Returning previous iterate.  (See Section 4 of notes for more information.)');
-    #      xp = x;  up = u;
-    #      return
-    #    end
-    #    Adx = A(dx);
-    #  else
-    #    H11p = diag(sigx) - (1/fe)*AtA + (1/fe)^2*atr*atr';
-    #    opts.POSDEF = true; opts.SYM = true;
-    #    [dx,hcond] = linsolve(H11p, w1p, opts);
-    #    if (hcond < 1e-14)
-    #      disp('Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)');
-    #      xp = x;  up = u;
-    #      return
-    #    end
-    #    Adx = A*dx;
-    #  end
-    #  du = (1./sig11).*ntgu - (sig12./sig11).*dx;  
-    # 
-    #  # minimum step size that stays in the interior
-    #  ifu1 = find((dx-du) > 0); ifu2 = find((-dx-du) > 0);
-    #  aqe = Adx'*Adx;   bqe = 2*r'*Adx;   cqe = r'*r - epsilon^2;
-    #  smax = min(1,min([...
-    #    -fu1(ifu1)./(dx(ifu1)-du(ifu1)); -fu2(ifu2)./(-dx(ifu2)-du(ifu2)); ...
-    #    (-bqe+sqrt(bqe^2-4*aqe*cqe))/(2*aqe)
-    #    ]));
-    #  s = (0.99)*smax;
-    #  
-    #  # backtracking line search
-    #  suffdec = 0;
-    #  backiter = 0;
-    #  while (~suffdec)
-    #    xp = x + s*dx;  up = u + s*du;  rp = r + s*Adx;
-    #    fu1p = xp - up;  fu2p = -xp - up;  fep = 1/2*(rp'*rp - epsilon^2);
-    #    fp = sum(up) - (1/tau)*(sum(log(-fu1p)) + sum(log(-fu2p)) + log(-fep));
-    #    flin = f + alpha*s*(gradf'*[dx; du]);
-    #    suffdec = (fp <= flin);
-    #    s = beta*s;
-    #    backiter = backiter + 1;
-    #    if (backiter > 32)
-    #      disp('Stuck on backtracking line search, returning previous iterate.  (See Section 4 of notes for more information.)');
-    #      xp = x;  up = u;
-    #      return
-    #    end
-    #  end
-    #  
-    #  # set up for next iteration
-    #  x = xp; u = up;  r = rp;
-    #  fu1 = fu1p;  fu2 = fu2p;  fe = fep;  f = fp;
-    #  
-    #  lambda2 = -(gradf'*[dx; du]);
-    #  stepsize = s*norm([dx; du]);
-    #  niter = niter + 1;
-    #  done = (lambda2/2 < newtontol) | (niter >= newtonmaxiter);
-    #  
-    #  disp(sprintf('Newton iter = #d, Functional = #8.3f, Newton decrement = #8.3f, Stepsize = #8.3e', ...
-    #    niter, f, lambda2/2, stepsize));
-    #  if (largescale)
-    #    disp(sprintf('                CG Res = #8.3e, CG Iter = #d', cgres, cgiter));
-    #  else
-    #    disp(sprintf('                  H11p condition number = #8.3e', hcond));
-    #  end
-    #      
-    #end
-    
-    # End of original Matab code
-    #----------------------------
-    
-    # check if the matrix A is implicit or explicit
-    #largescale = isa(A,'function_handle');
-    if hasattr(A, '__call__'):
-        largescale = True
-    else:
-        largescale = False    
-    
-    # line search parameters
-    alpha = 0.01
-    beta = 0.5
-    
-    #if (~largescale), AtA = A'*A; end
-    if not largescale:
-        AtA = np.dot(A.T,A)
-    
-    # initial point
-    x = x0.copy()
-    u = u0.copy()
-    #if (largescale), r = A(x) - b; else  r = A*x - b; end
-    if largescale:
-        r = A(x) - b
-    else:
-        r = np.dot(A,x) - b
-        
-    fu1 = x - u
-    fu2 = -x - u
-    fe = 1/2*(np.vdot(r,r) - epsilon**2)
-    f = u.sum() - (1/tau)*(np.log(-fu1).sum() + np.log(-fu2).sum() + math.log(-fe))
-    
-    niter = 0
-    done = 0
-    while not done:
-      
-      #if (largescale), atr = At(r); else  atr = A'*r; end
-      if largescale:
-          atr = At(r)
-      else:
-          atr = np.dot(A.T,r)
-      
-      #ntgz = 1./fu1 - 1./fu2 + 1/fe*atr;
-      ntgz = 1.0/fu1 - 1.0/fu2 + 1.0/fe*atr
-      #ntgu = -tau - 1./fu1 - 1./fu2;
-      ntgu = -tau - 1.0/fu1 - 1.0/fu2
-      #gradf = -(1/tau)*[ntgz; ntgu];
-      gradf = -(1.0/tau)*np.concatenate((ntgz, ntgu),0)
-      
-      #sig11 = 1./fu1.^2 + 1./fu2.^2;
-      sig11 = 1.0/(fu1**2) + 1.0/(fu2**2)
-      #sig12 = -1./fu1.^2 + 1./fu2.^2;
-      sig12 = -1.0/(fu1**2) + 1.0/(fu2**2)
-      #sigx = sig11 - sig12.^2./sig11;
-      sigx = sig11 - (sig12**2)/sig11
-        
-      #w1p = ntgz - sig12./sig11.*ntgu;
-      w1p = ntgz - sig12/sig11*ntgu
-      if largescale:
-        #h11pfun = @(z) sigx.*z - (1/fe)*At(A(z)) + 1/fe^2*(atr'*z)*atr;
-        h11pfun = lambda z: sigx*z - (1.0/fe)*At(A(z)) + 1.0/(fe**2)*np.dot(np.dot(atr.T,z),atr)
-        dx,cgres,cgiter = cgsolve(h11pfun, w1p, cgtol, cgmaxiter, 0)
-        if (cgres > 1/2):
-          print 'Cannot solve system.  Returning previous iterate.  (See Section 4 of notes for more information.)'
-          xp = x.copy()
-          up = u.copy()
-          return xp,up,niter
-        #end
-        Adx = A(dx)
-      else:
-        #H11p = diag(sigx) - (1/fe)*AtA + (1/fe)^2*atr*atr';
-        H11p = np.diag(sigx) - (1/fe)*AtA + (1/fe)**2*np.dot(atr,atr.T)
-        #opts.POSDEF = true; opts.SYM = true;
-        #[dx,hcond] = linsolve(H11p, w1p, opts);
-        #if (hcond < 1e-14)
-        #  disp('Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)');
-        #  xp = x;  up = u;
-        #  return
-        #end
-        try:
-            dx = scipy.linalg.solve(H11p, w1p, sym_pos=True)
-            hcond = 1.0/scipy.linalg.cond(H11p)
-        except scipy.linalg.LinAlgError:
-            print 'Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)'
-            xp = x.copy()
-            up = u.copy()
-            return xp,up,niter
-        if hcond < 1e-14:
-            print 'Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)'
-            xp = x.copy()
-            up = u.copy()
-            return xp,up,niter
-        
-        #Adx = A*dx;
-        Adx = np.dot(A,dx)
-      #end
-      #du = (1./sig11).*ntgu - (sig12./sig11).*dx;  
-      du = (1.0/sig11)*ntgu - (sig12/sig11)*dx;
-     
-      # minimum step size that stays in the interior
-      #ifu1 = find((dx-du) > 0); ifu2 = find((-dx-du) > 0);
-      ifu1 = np.nonzero((dx-du)>0)
-      ifu2 = np.nonzero((-dx-du)>0)
-      #aqe = Adx'*Adx;   bqe = 2*r'*Adx;   cqe = r'*r - epsilon^2;
-      aqe = np.dot(Adx.T,Adx)
-      bqe = 2*np.dot(r.T,Adx)
-      cqe = np.vdot(r,r) - epsilon**2
-      #smax = min(1,min([...
-      #  -fu1(ifu1)./(dx(ifu1)-du(ifu1)); -fu2(ifu2)./(-dx(ifu2)-du(ifu2)); ...
-      #  (-bqe+sqrt(bqe^2-4*aqe*cqe))/(2*aqe)
-      #  ]));
-      smax = min(1,np.concatenate((-fu1[ifu1]/(dx[ifu1]-du[ifu1]) , -fu2[ifu2]/(-dx[ifu2]-du[ifu2]) , (-bqe + math.sqrt(bqe**2-4*aqe*cqe))/(2*aqe)),0).min())
-        
-      s = (0.99)*smax
-      
-      # backtracking line search
-      suffdec = 0
-      backiter = 0
-      while not suffdec:
-        #xp = x + s*dx;  up = u + s*du;  rp = r + s*Adx;
-        xp = x + s*dx
-        up = u + s*du
-        rp = r + s*Adx
-        #fu1p = xp - up;  fu2p = -xp - up;  fep = 1/2*(rp'*rp - epsilon^2);
-        fu1p = xp - up
-        fu2p = -xp - up
-        fep = 0.5*(np.vdot(r,r) - epsilon**2)
-        #fp = sum(up) - (1/tau)*(sum(log(-fu1p)) + sum(log(-fu2p)) + log(-fep));
-        fp = up.sum() - (1.0/tau)*(np.log(-fu1p).sum() + np.log(-fu2p).sum() + log(-fep))
-        #flin = f + alpha*s*(gradf'*[dx; du]);
-        flin = f + alpha*s*np.dot(gradf.T , np.concatenate((dx,du),0))
-        #suffdec = (fp <= flin);
-        if fp <= flin:
-            suffdec = True
-        else:
-            suffdec = False
-        
-        s = beta*s
-        backiter = backiter + 1
-        if (backiter > 32):
-          print 'Stuck on backtracking line search, returning previous iterate.  (See Section 4 of notes for more information.)'
-          xp = x.copy()
-          up = u.copy()
-          return xp,up,niter
-        #end
-      #end
-      
-      # set up for next iteration
-      #x = xp; u = up;  r = rp;
-      x = xp.copy()
-      u = up.copy()
-      r = rp.copy()
-      #fu1 = fu1p;  fu2 = fu2p;  fe = fep;  f = fp;
-      fu1 = fu1p.copy()
-      fu2 = fu2p.copy()
-      fe = fep
-      f = fp
-      
-      #lambda2 = -(gradf'*[dx; du]);
-      lambda2 = -np.dot(gradf , np.concatenate((dx,du),0))
-      #stepsize = s*norm([dx; du]);
-      stepsize = s * np.linalg.norm(np.concatenate((dx,du),0))
-      niter = niter + 1
-      #done = (lambda2/2 < newtontol) | (niter >= newtonmaxiter);
-      if lambda2/2.0 < newtontol or niter >= newtonmaxiter:
-          done = 1
-      else:
-          done = 0
-      
-      #disp(sprintf('Newton iter = #d, Functional = #8.3f, Newton decrement = #8.3f, Stepsize = #8.3e', ...
-      print 'Newton iter = ',niter,', Functional = ',f,', Newton decrement = ',lambda2/2.0,', Stepsize = ',stepsize
-
-      if largescale:
-        print '                CG Res = ',cgres,', CG Iter = ',cgiter
-      else:
-        print '                  H11p condition number = ',hcond
-      #end
-          
-    #end
-    return xp,up,niter
-
-#function xp = l1qc_logbarrier(x0, A, At, b, epsilon, lbtol, mu, cgtol, cgmaxiter)
-def l1qc_logbarrier(x0, A, At, b, epsilon, lbtol=1e-3, mu=10, cgtol=1e-8, cgmaxiter=200):
-    # Solve quadratically constrained l1 minimization:
-    # min ||x||_1   s.t.  ||Ax - b||_2 <= \epsilon
-    #
-    # Reformulate as the second-order cone program
-    # min_{x,u}  sum(u)   s.t.    x - u <= 0,
-    #                            -x - u <= 0,
-    #      1/2(||Ax-b||^2 - \epsilon^2) <= 0
-    # and use a log barrier algorithm.
-    #
-    # Usage:  xp = l1qc_logbarrier(x0, A, At, b, epsilon, lbtol, mu, cgtol, cgmaxiter)
-    #
-    # x0 - Nx1 vector, initial point.
-    #
-    # A - Either a handle to a function that takes a N vector and returns a K 
-    #     vector , or a KxN matrix.  If A is a function handle, the algorithm
-    #     operates in "largescale" mode, solving the Newton systems via the
-    #     Conjugate Gradients algorithm.
-    #
-    # At - Handle to a function that takes a K vector and returns an N vector.
-    #      If A is a KxN matrix, At is ignored.
-    #
-    # b - Kx1 vector of observations.
-    #
-    # epsilon - scalar, constraint relaxation parameter
-    #
-    # lbtol - The log barrier algorithm terminates when the duality gap <= lbtol.
-    #         Also, the number of log barrier iterations is completely
-    #         determined by lbtol.
-    #         Default = 1e-3.
-    #
-    # mu - Factor by which to increase the barrier constant at each iteration.
-    #      Default = 10.
-    #
-    # cgtol - Tolerance for Conjugate Gradients; ignored if A is a matrix.
-    #     Default = 1e-8.
-    #
-    # cgmaxiter - Maximum number of iterations for Conjugate Gradients; ignored
-    #     if A is a matrix.
-    #     Default = 200.
-    #
-    # Written by: Justin Romberg, Caltech
-    # Email: jrom@acm.caltech.edu
-    # Created: October 2005
-    #
-
-    #---------------------
-    # Original Matab code:
-    
-    #largescale = isa(A,'function_handle');
-    #
-    #if (nargin < 6), lbtol = 1e-3; end
-    #if (nargin < 7), mu = 10; end
-    #if (nargin < 8), cgtol = 1e-8; end
-    #if (nargin < 9), cgmaxiter = 200; end
-    #
-    #newtontol = lbtol;
-    #newtonmaxiter = 50;
-    #
-    #N = length(x0);
-    #
-    ## starting point --- make sure that it is feasible
-    #if (largescale)
-    #  if (norm(A(x0)-b) > epsilon)
-    #    disp('Starting point infeasible; using x0 = At*inv(AAt)*y.');
-    #    AAt = @(z) A(At(z));
-    #    w = cgsolve(AAt, b, cgtol, cgmaxiter, 0);
-    #    if (cgres > 1/2)
-    #      disp('A*At is ill-conditioned: cannot find starting point');
-    #      xp = x0;
-    #      return;
-    #    end
-    #    x0 = At(w);
-    #  end
-    #else
-    #  if (norm(A*x0-b) > epsilon)
-    #    disp('Starting point infeasible; using x0 = At*inv(AAt)*y.');
-    #    opts.POSDEF = true; opts.SYM = true;
-    #    [w, hcond] = linsolve(A*A', b, opts);
-    #    if (hcond < 1e-14)
-    #      disp('A*At is ill-conditioned: cannot find starting point');
-    #      xp = x0;
-    #      return;
-    #    end
-    #    x0 = A'*w;
-    #  end  
-    #end
-    #x = x0;
-    #u = (0.95)*abs(x0) + (0.10)*max(abs(x0));
-    #
-    #disp(sprintf('Original l1 norm = #.3f, original functional = #.3f', sum(abs(x0)), sum(u)));
-    #
-    ## choose initial value of tau so that the duality gap after the first
-    ## step will be about the origial norm
-    #tau = max((2*N+1)/sum(abs(x0)), 1);
-    #                                                                                                                          
-    #lbiter = ceil((log(2*N+1)-log(lbtol)-log(tau))/log(mu));
-    #disp(sprintf('Number of log barrier iterations = #d\n', lbiter));
-    #
-    #totaliter = 0;
-    #
-    ## Added by Nic
-    #if lbiter == 0
-    #    xp = zeros(size(x0));
-    #end
-    #
-    #for ii = 1:lbiter
-    #
-    #  [xp, up, ntiter] = l1qc_newton(x, u, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter);
-    #  totaliter = totaliter + ntiter;
-    #  
-    #  disp(sprintf('\nLog barrier iter = #d, l1 = #.3f, functional = #8.3f, tau = #8.3e, total newton iter = #d\n', ...
-    #    ii, sum(abs(xp)), sum(up), tau, totaliter));
-    #  
-    #  x = xp;
-    #  u = up;
-    # 
-    #  tau = mu*tau;
-    #  
-    #end
-    #
-    # End of original Matab code
-    #----------------------------
-    
-    #largescale = isa(A,'function_handle');
-    if hasattr(A, '__call__'):
-        largescale = True
-    else:
-        largescale = False
-    
-    #    if (nargin < 6), lbtol = 1e-3; end
-    #    if (nargin < 7), mu = 10; end
-    #    if (nargin < 8), cgtol = 1e-8; end
-    #    if (nargin < 9), cgmaxiter = 200; end
-    # Nic: added them as optional parameteres
-    
-    newtontol = lbtol
-    newtonmaxiter = 50
-    
-    #N = length(x0);
-    N = x0.size()
-    
-    # starting point --- make sure that it is feasible
-    if largescale:
-      if np.linalg.norm(A(x0) - b) > epsilon:
-        print 'Starting point infeasible; using x0 = At*inv(AAt)*y.'
-        #AAt = @(z) A(At(z));
-        AAt = lambda z: A(At(z))
-        # TODO: implement cgsolve
-        w,cgres,cgiter = cgsolve(AAt, b, cgtol, cgmaxiter, 0)
-        if (cgres > 1/2):
-          print 'A*At is ill-conditioned: cannot find starting point'
-          xp = x0.copy()
-          return xp
-        #end
-        x0 = At(w)
-      #end
-    else:
-      if np.linalg.norm( np.dot(A,x0) - b ) > epsilon:
-        print 'Starting point infeasible; using x0 = At*inv(AAt)*y.'
-        #opts.POSDEF = true; opts.SYM = true;
-        #[w, hcond] = linsolve(A*A', b, opts);
-        #if (hcond < 1e-14)
-        #  disp('A*At is ill-conditioned: cannot find starting point');
-        #  xp = x0;
-        #  return;
-        #end
-        try:
-            w = scipy.linalg.solve(np.dot(A,A.T), b, sym_pos=True)
-            hcond = 1.0/scipy.linalg.cond(np.dot(A,A.T))
-        except scipy.linalg.LinAlgError:
-            print 'A*At is ill-conditioned: cannot find starting point'
-            xp = x0.copy()
-            return xp
-        if hcond < 1e-14:
-            print 'A*At is ill-conditioned: cannot find starting point'
-            xp = x0.copy()
-            return xp           
-        #x0 = A'*w;
-        x0 = np.dot(A.T, w)
-      #end  
-    #end
-    x = x0.copy()
-    u = (0.95)*np.abs(x0) + (0.10)*np.abs(x0).max()
-    
-    #disp(sprintf('Original l1 norm = #.3f, original functional = #.3f', sum(abs(x0)), sum(u)));
-    print 'Original l1 norm = ',np.abs(x0).sum(),'original functional = ',u.sum()
-    
-    # choose initial value of tau so that the duality gap after the first
-    # step will be about the origial norm
-    tau = max(((2*N+1)/np.abs(x0).sum()), 1)
-                                                                                                                              
-    lbiter = math.ceil((math.log(2*N+1)-math.log(lbtol)-math.log(tau))/math.log(mu))
-    #disp(sprintf('Number of log barrier iterations = #d\n', lbiter));
-    print 'Number of log barrier iterations = ',lbiter
-    
-    totaliter = 0
-    
-    # Added by Nic, to fix some crashing
-    if lbiter == 0:
-        xp = np.zeros(x0.size)
-    #end
-    
-    #for ii = 1:lbiter
-    for ii in np.arange(lbiter):
-    
-      xp,up,ntiter = l1qc_newton(x, u, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter)
-      totaliter = totaliter + ntiter
-      
-      #disp(sprintf('\nLog barrier iter = #d, l1 = #.3f, functional = #8.3f, tau = #8.3e, total newton iter = #d\n', ...
-      #  ii, sum(abs(xp)), sum(up), tau, totaliter));
-      print 'Log barrier iter = ',ii,', l1 = ',np.abs(xp).sum(),', functional = ',up.sum(),', tau = ',tau,', total newton iter = ',totaliter
-      x = xp.copy()
-      u = up.copy()
-     
-      tau = mu*tau
-      
-    #end
-    return xp
-                   
--- a/BP/l1qc_logbarrier.m	Thu Oct 20 21:06:06 2011 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,121 +0,0 @@
-% l1qc_logbarrier.m
-%
-% Solve quadratically constrained l1 minimization:
-% min ||x||_1   s.t.  ||Ax - b||_2 <= \epsilon
-%
-% Reformulate as the second-order cone program
-% min_{x,u}  sum(u)   s.t.    x - u <= 0,
-%                            -x - u <= 0,
-%      1/2(||Ax-b||^2 - \epsilon^2) <= 0
-% and use a log barrier algorithm.
-%
-% Usage:  xp = l1qc_logbarrier(x0, A, At, b, epsilon, lbtol, mu, cgtol, cgmaxiter)
-%
-% x0 - Nx1 vector, initial point.
-%
-% A - Either a handle to a function that takes a N vector and returns a K 
-%     vector , or a KxN matrix.  If A is a function handle, the algorithm
-%     operates in "largescale" mode, solving the Newton systems via the
-%     Conjugate Gradients algorithm.
-%
-% At - Handle to a function that takes a K vector and returns an N vector.
-%      If A is a KxN matrix, At is ignored.
-%
-% b - Kx1 vector of observations.
-%
-% epsilon - scalar, constraint relaxation parameter
-%
-% lbtol - The log barrier algorithm terminates when the duality gap <= lbtol.
-%         Also, the number of log barrier iterations is completely
-%         determined by lbtol.
-%         Default = 1e-3.
-%
-% mu - Factor by which to increase the barrier constant at each iteration.
-%      Default = 10.
-%
-% cgtol - Tolerance for Conjugate Gradients; ignored if A is a matrix.
-%     Default = 1e-8.
-%
-% cgmaxiter - Maximum number of iterations for Conjugate Gradients; ignored
-%     if A is a matrix.
-%     Default = 200.
-%
-% Written by: Justin Romberg, Caltech
-% Email: jrom@acm.caltech.edu
-% Created: October 2005
-%
-
-function xp = l1qc_logbarrier(x0, A, At, b, epsilon, lbtol, mu, cgtol, cgmaxiter)  
-
-largescale = isa(A,'function_handle');
-
-if (nargin < 6), lbtol = 1e-3; end
-if (nargin < 7), mu = 10; end
-if (nargin < 8), cgtol = 1e-8; end
-if (nargin < 9), cgmaxiter = 200; end
-
-newtontol = lbtol;
-newtonmaxiter = 50;
-
-N = length(x0);
-
-% starting point --- make sure that it is feasible
-if (largescale)
-  if (norm(A(x0)-b) > epsilon)
-    disp('Starting point infeasible; using x0 = At*inv(AAt)*y.');
-    AAt = @(z) A(At(z));
-    w = cgsolve(AAt, b, cgtol, cgmaxiter, 0);
-    if (cgres > 1/2)
-      disp('A*At is ill-conditioned: cannot find starting point');
-      xp = x0;
-      return;
-    end
-    x0 = At(w);
-  end
-else
-  if (norm(A*x0-b) > epsilon)
-    disp('Starting point infeasible; using x0 = At*inv(AAt)*y.');
-    opts.POSDEF = true; opts.SYM = true;
-    [w, hcond] = linsolve(A*A', b, opts);
-    if (hcond < 1e-14)
-      disp('A*At is ill-conditioned: cannot find starting point');
-      xp = x0;
-      return;
-    end
-    x0 = A'*w;
-  end  
-end
-x = x0;
-u = (0.95)*abs(x0) + (0.10)*max(abs(x0));
-
-disp(sprintf('Original l1 norm = %.3f, original functional = %.3f', sum(abs(x0)), sum(u)));
-
-% choose initial value of tau so that the duality gap after the first
-% step will be about the origial norm
-tau = max((2*N+1)/sum(abs(x0)), 1);
-                                                                                                                          
-lbiter = ceil((log(2*N+1)-log(lbtol)-log(tau))/log(mu));
-disp(sprintf('Number of log barrier iterations = %d\n', lbiter));
-
-totaliter = 0;
-
-% Added by Nic
-if lbiter == 0
-    xp = zeros(size(x0));
-end
-
-for ii = 1:lbiter
-
-  [xp, up, ntiter] = l1qc_newton(x, u, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter);
-  totaliter = totaliter + ntiter;
-  
-  disp(sprintf('\nLog barrier iter = %d, l1 = %.3f, functional = %8.3f, tau = %8.3e, total newton iter = %d\n', ...
-    ii, sum(abs(xp)), sum(up), tau, totaliter));
-  
-  x = xp;
-  u = up;
- 
-  tau = mu*tau;
-  
-end
-                   
--- a/BP/l1qc_newton.m	Thu Oct 20 21:06:06 2011 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,150 +0,0 @@
-% l1qc_newton.m
-%
-% Newton algorithm for log-barrier subproblems for l1 minimization
-% with quadratic constraints.
-%
-% Usage: 
-% [xp,up,niter] = l1qc_newton(x0, u0, A, At, b, epsilon, tau, 
-%                             newtontol, newtonmaxiter, cgtol, cgmaxiter)
-%
-% x0,u0 - starting points
-%
-% A - Either a handle to a function that takes a N vector and returns a K 
-%     vector , or a KxN matrix.  If A is a function handle, the algorithm
-%     operates in "largescale" mode, solving the Newton systems via the
-%     Conjugate Gradients algorithm.
-%
-% At - Handle to a function that takes a K vector and returns an N vector.
-%      If A is a KxN matrix, At is ignored.
-%
-% b - Kx1 vector of observations.
-%
-% epsilon - scalar, constraint relaxation parameter
-%
-% tau - Log barrier parameter.
-%
-% newtontol - Terminate when the Newton decrement is <= newtontol.
-%         Default = 1e-3.
-%
-% newtonmaxiter - Maximum number of iterations.
-%         Default = 50.
-%
-% cgtol - Tolerance for Conjugate Gradients; ignored if A is a matrix.
-%     Default = 1e-8.
-%
-% cgmaxiter - Maximum number of iterations for Conjugate Gradients; ignored
-%     if A is a matrix.
-%     Default = 200.
-%
-% Written by: Justin Romberg, Caltech
-% Email: jrom@acm.caltech.edu
-% Created: October 2005
-%
-
-
-function [xp, up, niter] = l1qc_newton(x0, u0, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter) 
-
-% check if the matrix A is implicit or explicit
-largescale = isa(A,'function_handle');
-
-% line search parameters
-alpha = 0.01;
-beta = 0.5;  
-
-if (~largescale), AtA = A'*A; end
-
-% initial point
-x = x0;
-u = u0;
-if (largescale), r = A(x) - b; else  r = A*x - b; end
-fu1 = x - u;
-fu2 = -x - u;
-fe = 1/2*(r'*r - epsilon^2);
-f = sum(u) - (1/tau)*(sum(log(-fu1)) + sum(log(-fu2)) + log(-fe));
-
-niter = 0;
-done = 0;
-while (~done)
-  
-  if (largescale), atr = At(r); else  atr = A'*r; end
-  
-  ntgz = 1./fu1 - 1./fu2 + 1/fe*atr;
-  ntgu = -tau - 1./fu1 - 1./fu2;
-  gradf = -(1/tau)*[ntgz; ntgu];
-  
-  sig11 = 1./fu1.^2 + 1./fu2.^2;
-  sig12 = -1./fu1.^2 + 1./fu2.^2;
-  sigx = sig11 - sig12.^2./sig11;
-    
-  w1p = ntgz - sig12./sig11.*ntgu;
-  if (largescale)
-    h11pfun = @(z) sigx.*z - (1/fe)*At(A(z)) + 1/fe^2*(atr'*z)*atr;
-    [dx, cgres, cgiter] = cgsolve(h11pfun, w1p, cgtol, cgmaxiter, 0);
-    if (cgres > 1/2)
-      disp('Cannot solve system.  Returning previous iterate.  (See Section 4 of notes for more information.)');
-      xp = x;  up = u;
-      return
-    end
-    Adx = A(dx);
-  else
-    H11p = diag(sigx) - (1/fe)*AtA + (1/fe)^2*atr*atr';
-    opts.POSDEF = true; opts.SYM = true;
-    [dx,hcond] = linsolve(H11p, w1p, opts);
-    if (hcond < 1e-14)
-      disp('Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)');
-      xp = x;  up = u;
-      return
-    end
-    Adx = A*dx;
-  end
-  du = (1./sig11).*ntgu - (sig12./sig11).*dx;  
- 
-  % minimum step size that stays in the interior
-  ifu1 = find((dx-du) > 0); ifu2 = find((-dx-du) > 0);
-  aqe = Adx'*Adx;   bqe = 2*r'*Adx;   cqe = r'*r - epsilon^2;
-  smax = min(1,min([...
-    -fu1(ifu1)./(dx(ifu1)-du(ifu1)); -fu2(ifu2)./(-dx(ifu2)-du(ifu2)); ...
-    (-bqe+sqrt(bqe^2-4*aqe*cqe))/(2*aqe)
-    ]));
-  s = (0.99)*smax;
-  
-  % backtracking line search
-  suffdec = 0;
-  backiter = 0;
-  while (~suffdec)
-    xp = x + s*dx;  up = u + s*du;  rp = r + s*Adx;
-    fu1p = xp - up;  fu2p = -xp - up;  fep = 1/2*(rp'*rp - epsilon^2);
-    fp = sum(up) - (1/tau)*(sum(log(-fu1p)) + sum(log(-fu2p)) + log(-fep));
-    flin = f + alpha*s*(gradf'*[dx; du]);
-    suffdec = (fp <= flin);
-    s = beta*s;
-    backiter = backiter + 1;
-    if (backiter > 32)
-      disp('Stuck on backtracking line search, returning previous iterate.  (See Section 4 of notes for more information.)');
-      xp = x;  up = u;
-      return
-    end
-  end
-  
-  % set up for next iteration
-  x = xp; u = up;  r = rp;
-  fu1 = fu1p;  fu2 = fu2p;  fe = fep;  f = fp;
-  
-  lambda2 = -(gradf'*[dx; du]);
-  stepsize = s*norm([dx; du]);
-  niter = niter + 1;
-  done = (lambda2/2 < newtontol) | (niter >= newtonmaxiter);
-  
-  disp(sprintf('Newton iter = %d, Functional = %8.3f, Newton decrement = %8.3f, Stepsize = %8.3e', ...
-    niter, f, lambda2/2, stepsize));
-  if (largescale)
-    disp(sprintf('                CG Res = %8.3e, CG Iter = %d', cgres, cgiter));
-  else
-    disp(sprintf('                  H11p condition number = %8.3e', hcond));
-  end
-      
-end
-
-
-
-
--- a/OMP/greed_omp_qr.m	Thu Oct 20 21:06:06 2011 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,472 +0,0 @@
-function [s, err_mse, iter_time]=greed_omp_qr(x,A,m,varargin)
-% greed_omp_qr: Orthogonal Matching Pursuit algorithm based on QR
-% factorisation
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Usage
-% [s, err_mse, iter_time]=greed_omp_qr(x,P,m,'option_name','option_value')
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Input
-%   Mandatory:
-%               x   Observation vector to be decomposed
-%               P   Either:
-%                       1) An nxm matrix (n must be dimension of x)
-%                       2) A function handle (type "help function_format" 
-%                          for more information)
-%                          Also requires specification of P_trans option.
-%                       3) An object handle (type "help object_format" for 
-%                          more information)
-%               m   length of s 
-%
-%   Possible additional options:
-%   (specify as many as you want using 'option_name','option_value' pairs)
-%   See below for explanation of options:
-%__________________________________________________________________________
-%   option_name    |     available option_values                | default
-%--------------------------------------------------------------------------
-%   stopCrit       | M, corr, mse, mse_change                   | M
-%   stopTol        | number (see below)                         | n/4
-%   P_trans        | function_handle (see below)                | 
-%   maxIter        | positive integer (see below)               | n
-%   verbose        | true, false                                | false
-%   start_val      | vector of length m                         | zeros
-%
-%   Available stopping criteria :
-%               M           -   Extracts exactly M = stopTol elements.
-%               corr        -   Stops when maximum correlation between
-%                               residual and atoms is below stopTol value.
-%               mse         -   Stops when mean squared error of residual 
-%                               is below stopTol value.
-%               mse_change  -   Stops when the change in the mean squared 
-%                               error falls below stopTol value.
-%
-%   stopTol: Value for stopping criterion.
-%
-%   P_trans: If P is a function handle, then P_trans has to be specified and 
-%            must be a function handle. 
-%
-%   maxIter: Maximum number of allowed iterations.
-%
-%   verbose: Logical value to allow algorithm progress to be displayed.
-%
-%   start_val: Allows algorithms to start from partial solution.
-%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Outputs
-%    s              Solution vector 
-%    err_mse        Vector containing mse of approximation error for each 
-%                   iteration
-%    iter_time      Vector containing computation times for each iteration
-%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Description
-%   greed_omp_qr performs a greedy signal decomposition. 
-%   In each iteration a new element is selected depending on the inner
-%   product between the current residual and columns in P.
-%   The non-zero elements of s are approximated by orthogonally projecting 
-%   x onto the selected elements in each iteration.
-%   The algorithm uses QR decomposition.
-%
-% See Also
-%   greed_omp_chol, greed_omp_cg, greed_omp_cgp, greed_omp_pinv, 
-%   greed_omp_linsolve, greed_gp, greed_nomp
-%
-% Copyright (c) 2007 Thomas Blumensath
-%
-% The University of Edinburgh
-% Email: thomas.blumensath@ed.ac.uk
-% Comments and bug reports welcome
-%
-% This file is part of sparsity Version 0.1
-% Created: April 2007
-%
-% Part of this toolbox was developed with the support of EPSRC Grant
-% D000246/1
-%
-% Please read COPYRIGHT.m for terms and conditions.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%                    Default values and initialisation
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-
-[n1 n2]=size(x);
-if n2 == 1
-    n=n1;
-elseif n1 == 1
-    x=x';
-    n=n2;
-else
-   display('x must be a vector.');
-   return
-end
-    
-sigsize     = x'*x/n;
-initial_given=0;
-err_mse     = [];
-iter_time   = [];
-STOPCRIT    = 'M';
-STOPTOL     = ceil(n/4);
-MAXITER     = n;
-verbose     = false;
-s_initial   = zeros(m,1);
-
-
-if verbose
-   display('Initialising...') 
-end
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%                           Output variables
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-switch nargout 
-    case 3
-        comp_err=true;
-        comp_time=true;
-    case 2 
-        comp_err=true;
-        comp_time=false;
-    case 1
-        comp_err=false;
-        comp_time=false;
-    case 0
-        error('Please assign output variable.')
-    otherwise
-        error('Too many output arguments specified')
-end
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%                       Look through options
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-% Put option into nice format
-Options={};
-OS=nargin-3;
-c=1;
-for i=1:OS
-    if isa(varargin{i},'cell')
-        CellSize=length(varargin{i});
-        ThisCell=varargin{i};
-        for j=1:CellSize
-            Options{c}=ThisCell{j};
-            c=c+1;
-        end
-    else
-        Options{c}=varargin{i};
-        c=c+1;
-    end
-end
-OS=length(Options);
-if rem(OS,2)
-   error('Something is wrong with argument name and argument value pairs.') 
-end
-
-for i=1:2:OS
-   switch Options{i}
-        case {'stopCrit'}
-            if (strmatch(Options{i+1},{'M'; 'corr'; 'mse'; 'mse_change'},'exact'));
-                STOPCRIT    = Options{i+1};  
-            else error('stopCrit must be char string [M, corr, mse, mse_change]. Exiting.'); end 
-        case {'stopTol'}
-            if isa(Options{i+1},'numeric') ; STOPTOL     = Options{i+1};   
-            else error('stopTol must be number. Exiting.'); end
-        case {'P_trans'} 
-            if isa(Options{i+1},'function_handle'); Pt = Options{i+1};   
-            else error('P_trans must be function _handle. Exiting.'); end
-        case {'maxIter'}
-            if isa(Options{i+1},'numeric'); MAXITER     = Options{i+1};             
-            else error('maxIter must be a number. Exiting.'); end
-        case {'verbose'}
-            if isa(Options{i+1},'logical'); verbose     = Options{i+1};   
-            else error('verbose must be a logical. Exiting.'); end 
-        case {'start_val'}
-            if isa(Options{i+1},'numeric') & length(Options{i+1}) == m ;
-                s_initial     = Options{i+1};   
-                initial_given=1;
-            else error('start_val must be a vector of length m. Exiting.'); end
-        otherwise
-            error('Unrecognised option. Exiting.') 
-   end
-end
-
-
-
-if strcmp(STOPCRIT,'M') 
-    maxM=STOPTOL;
-else
-    maxM=MAXITER;
-end
-
-if nargout >=2
-    err_mse = zeros(maxM,1);
-end
-if nargout ==3
-    iter_time = zeros(maxM,1);
-end
-
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%                        Make P and Pt functions
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-if          isa(A,'float')      P =@(z) A*z;  Pt =@(z) A'*z;
-elseif      isobject(A)         P =@(z) A*z;  Pt =@(z) A'*z;
-elseif      isa(A,'function_handle') 
-    try
-        if          isa(Pt,'function_handle'); P=A;
-        else        error('If P is a function handle, Pt also needs to be a function handle. Exiting.'); end
-    catch error('If P is a function handle, Pt needs to be specified. Exiting.'); end
-else        error('P is of unsupported type. Use matrix, function_handle or object. Exiting.'); end
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%                 Random Check to see if dictionary is normalised 
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-        % Commented by Nic, 13 Sept 2011
-        % Don't want any slow text output
-%         mask=zeros(m,1);
-%         mask(ceil(rand*m))=1;
-%         nP=norm(P(mask));
-%         if abs(1-nP)>1e-3;
-%             display('Dictionary appears not to have unit norm columns.')
-%         end
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%              Check if we have enough memory and initialise 
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-        
-
-        try Q=zeros(n,maxM);
-        catch error('Variable size is too large. Please try greed_omp_chol algorithm or reduce MAXITER.')
-        end 
-        try R=zeros(maxM);
-        catch error('Variable size is too large. Please try greed_omp_chol algorithm or reduce MAXITER.')
-        end
-
-
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%                        Do we start from zero or not?
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-if initial_given ==1;
-    IN          = find(s_initial);
-    if ~isempty(IN)
-        Residual    = x-P(s_initial);
-        lengthIN=length(IN);
-        z=[];
-        for k=1:length(IN)
-            % Extract new element
-             mask=zeros(m,1);
-             mask(IN(k))=1;
-             new_element=P(mask);
-
-            % Orthogonalise new element 
-             qP=Q(:,1:k-1)'*new_element;
-             q=new_element-Q(:,1:k-1)*(qP);
-
-             nq=norm(q);
-             q=q/nq;
-            % Update QR factorisation 
-             R(1:k-1,k)=qP;
-             R(k,k)=nq;
-             Q(:,k)=q;
-
-             z(k)=q'*x;
-        end
-        s           = s_initial;
-        Residual=x-Q(:,k)*z;
-        oldERR      = Residual'*Residual/n;
-    else
-    	IN          = [];
-        Residual    = x;
-        s           = s_initial;
-        sigsize     = x'*x/n;
-        oldERR      = sigsize;
-        k=0;
-        z=[];
-    end
-    
-else
-    IN          = [];
-    Residual    = x;
-    s           = s_initial;
-    sigsize     = x'*x/n;
-    oldERR      = sigsize;
-    k=0;
-    z=[];
-end
-
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%                        Main algorithm
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-if verbose
-   display('Main iterations...') 
-end
-tic
-t=0;
-DR=Pt(Residual);
-done = 0;
-iter=1;
-
-%Nic: find norms of dictionary atoms, for RandOMP
-% for i = 1:m
-%     norms(i) = norm(P([zeros(i-1,1) ; 1 ; zeros(m-i,1)]));
-% end
-
-while ~done
-    
-     % Select new element
-     DR(IN)=0;
-     % Nic: replace selection with random variable
-     % i.e. Randomized OMP!!
-     % DON'T FORGET ABOUT THIS!!
-     [v I]=max(abs(DR));
-     %I = randp(exp(abs(DR).^2 ./ (norms.^2)'), [1 1]);
-     IN=[IN I];
-
-    
-     k=k+1;
-     % Extract new element
-     mask=zeros(m,1);
-     mask(IN(k))=1;
-     new_element=P(mask);
-
-    % Orthogonalise new element 
-     qP=Q(:,1:k-1)'*new_element;
-     q=new_element-Q(:,1:k-1)*(qP);
-
-     nq=norm(q);
-     q=q/nq;
-    % Update QR factorisation 
-     R(1:k-1,k)=qP;
-     R(k,k)=nq;
-     Q(:,k)=q;
-
-     z(k)=q'*x;
-   
-    % New residual 
-     Residual=Residual-q*(z(k));
-     DR=Pt(Residual);
-     
-     ERR=Residual'*Residual/n;
-     if comp_err
-         err_mse(iter)=ERR;
-     end
-     
-     if comp_time
-         iter_time(iter)=toc;
-     end
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%                        Are we done yet?
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-     
-     if strcmp(STOPCRIT,'M')
-         if iter >= STOPTOL
-             done =1;
-         elseif verbose && toc-t>10
-            display(sprintf('Iteration %i. --- %i iterations to go',iter ,STOPTOL-iter)) 
-            t=toc;
-         end
-    elseif strcmp(STOPCRIT,'mse')
-         if comp_err
-            if err_mse(iter)<STOPTOL;
-                done = 1; 
-            elseif verbose && toc-t>10
-                display(sprintf('Iteration %i. --- %i mse',iter ,err_mse(iter))) 
-                t=toc;
-            end
-         else
-             if ERR<STOPTOL;
-                done = 1; 
-             elseif verbose && toc-t>10
-                display(sprintf('Iteration %i. --- %i mse',iter ,ERR)) 
-                t=toc;
-             end
-         end
-     elseif strcmp(STOPCRIT,'mse_change') && iter >=2
-         if comp_err && iter >=2
-              if ((err_mse(iter-1)-err_mse(iter))/sigsize <STOPTOL);
-                done = 1; 
-             elseif verbose && toc-t>10
-                display(sprintf('Iteration %i. --- %i mse change',iter ,(err_mse(iter-1)-err_mse(iter))/sigsize )) 
-                t=toc;
-             end
-         else
-             if ((oldERR - ERR)/sigsize < STOPTOL);
-                done = 1; 
-             elseif verbose && toc-t>10
-                display(sprintf('Iteration %i. --- %i mse change',iter ,(oldERR - ERR)/sigsize)) 
-                t=toc;
-             end
-         end
-     elseif strcmp(STOPCRIT,'corr') 
-          if max(abs(DR)) < STOPTOL;
-             done = 1; 
-          elseif verbose && toc-t>10
-                display(sprintf('Iteration %i. --- %i corr',iter ,max(abs(DR)))) 
-                t=toc;
-          end
-     end
-     
-    % Also stop if residual gets too small or maxIter reached
-     if comp_err
-         if err_mse(iter)<1e-16
-             display('Stopping. Exact signal representation found!')
-             done=1;
-         end
-     else
-
-
-         if iter>1
-             if ERR<1e-16
-                 display('Stopping. Exact signal representation found!')
-                 done=1;
-             end
-         end
-     end
-
-     if iter >= MAXITER
-         display('Stopping. Maximum number of iterations reached!')
-         done = 1; 
-     end
-     
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%                    If not done, take another round
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-   
-     if ~done
-        iter=iter+1;
-        oldERR=ERR;
-     end
-end
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%            Now we can solve for s by back-substitution
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
- s(IN)=R(1:k,1:k)\z(1:k)';
- 
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%                  Only return as many elements as iterations
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-if nargout >=2
-    err_mse = err_mse(1:iter);
-end
-if nargout ==3
-    iter_time = iter_time(1:iter);
-end
-if verbose
-   display('Done') 
-end
-
-% Change history
-%
-% 8 of Februray: Algo does no longer stop if dictionary is not normaliesd.
--- a/OMP/omp_QR.py	Thu Oct 20 21:06:06 2011 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,842 +0,0 @@
-import numpy as np
-import scipy.linalg
-import time
-import math
-
-
-#function [s, err_mse, iter_time]=greed_omp_qr(x,A,m,varargin)
-def greed_omp_qr(x,A,m,opts=[]):
-# greed_omp_qr: Orthogonal Matching Pursuit algorithm based on QR
-# factorisation
-# Nic: translated to Python on 19.10.2011. Original Matlab Code by Thomas Blumensath
-###########################################################################
-# Usage
-# [s, err_mse, iter_time]=greed_omp_qr(x,P,m,'option_name','option_value')
-###########################################################################
-###########################################################################
-# Input
-#   Mandatory:
-#               x   Observation vector to be decomposed
-#               P   Either:
-#                       1) An nxm matrix (n must be dimension of x)
-#                       2) A function handle (type "help function_format" 
-#                          for more information)
-#                          Also requires specification of P_trans option.
-#                       3) An object handle (type "help object_format" for 
-#                          more information)
-#               m   length of s 
-#
-#   Possible additional options:
-#   (specify as many as you want using 'option_name','option_value' pairs)
-#   See below for explanation of options:
-#__________________________________________________________________________
-#   option_name    |     available option_values                | default
-#--------------------------------------------------------------------------
-#   stopCrit       | M, corr, mse, mse_change                   | M
-#   stopTol        | number (see below)                         | n/4
-#   P_trans        | function_handle (see below)                | 
-#   maxIter        | positive integer (see below)               | n
-#   verbose        | true, false                                | false
-#   start_val      | vector of length m                         | zeros
-#
-#   Available stopping criteria :
-#               M           -   Extracts exactly M = stopTol elements.
-#               corr        -   Stops when maximum correlation between
-#                               residual and atoms is below stopTol value.
-#               mse         -   Stops when mean squared error of residual 
-#                               is below stopTol value.
-#               mse_change  -   Stops when the change in the mean squared 
-#                               error falls below stopTol value.
-#
-#   stopTol: Value for stopping criterion.
-#
-#   P_trans: If P is a function handle, then P_trans has to be specified and 
-#            must be a function handle. 
-#
-#   maxIter: Maximum number of allowed iterations.
-#
-#   verbose: Logical value to allow algorithm progress to be displayed.
-#
-#   start_val: Allows algorithms to start from partial solution.
-#
-###########################################################################
-# Outputs
-#    s              Solution vector 
-#    err_mse        Vector containing mse of approximation error for each 
-#                   iteration
-#    iter_time      Vector containing computation times for each iteration
-#
-###########################################################################
-# Description
-#   greed_omp_qr performs a greedy signal decomposition. 
-#   In each iteration a new element is selected depending on the inner
-#   product between the current residual and columns in P.
-#   The non-zero elements of s are approximated by orthogonally projecting 
-#   x onto the selected elements in each iteration.
-#   The algorithm uses QR decomposition.
-#
-# See Also
-#   greed_omp_chol, greed_omp_cg, greed_omp_cgp, greed_omp_pinv, 
-#   greed_omp_linsolve, greed_gp, greed_nomp
-#
-# Copyright (c) 2007 Thomas Blumensath
-#
-# The University of Edinburgh
-# Email: thomas.blumensath@ed.ac.uk
-# Comments and bug reports welcome
-#
-# This file is part of sparsity Version 0.1
-# Created: April 2007
-#
-# Part of this toolbox was developed with the support of EPSRC Grant
-# D000246/1
-#
-# Please read COPYRIGHT.m for terms and conditions.
-
-    ###########################################################################
-    #                    Default values and initialisation
-    ###########################################################################
-    #[n1 n2]=size(x);
-    #n1,n2 = x.shape
-    #if n2 == 1
-    #    n=n1;
-    #elseif n1 == 1
-    #    x=x';
-    #    n=n2;
-    #else
-    #   display('x must be a vector.');
-    #   return
-    #end
-    if x.ndim != 1:
-      print 'x must be a vector.'
-      return
-    n = x.size
-        
-    #sigsize     = x'*x/n;
-    sigsize     = np.vdot(x,x)/n;
-    initial_given = 0;
-    err_mse     = np.array([]);
-    iter_time   = np.array([]);
-    STOPCRIT    = 'M';
-    STOPTOL     = math.ceil(n/4.0);
-    MAXITER     = n;
-    verbose     = False;
-    s_initial   = np.zeros(m);
-    
-    if verbose:
-       print 'Initialising...'
-    #end
-    
-    ###########################################################################
-    #                           Output variables
-    ###########################################################################
-    #switch nargout 
-    #    case 3
-    #        comp_err=true;
-    #        comp_time=true;
-    #    case 2 
-    #        comp_err=true;
-    #        comp_time=false;
-    #    case 1
-    #        comp_err=false;
-    #        comp_time=false;
-    #    case 0
-    #        error('Please assign output variable.')
-    #    otherwise
-    #        error('Too many output arguments specified')
-    #end
-    if 'nargout' in opts:
-      if opts['nargout'] == 3:
-        comp_err  = True
-        comp_time = True
-      elif opts['nargout'] == 2:
-        comp_err  = True
-        comp_time = False
-      elif opts['nargout'] == 1:
-        comp_err  = False
-        comp_time = False
-      elif opts['nargout'] == 0:
-        print 'Please assign output variable.'
-        return
-      else:
-        print 'Too many output arguments specified'
-        return
-    else:
-        # If not given, make default nargout = 3
-        #  and add nargout to options
-        opts['nargout'] = 3
-        comp_err  = True
-        comp_time = True
-    
-    ###########################################################################
-    #                       Look through options
-    ###########################################################################
-    # Put option into nice format
-    #Options={};
-    #OS=nargin-3;
-    #c=1;
-    #for i=1:OS
-    #    if isa(varargin{i},'cell')
-    #        CellSize=length(varargin{i});
-    #        ThisCell=varargin{i};
-    #        for j=1:CellSize
-    #            Options{c}=ThisCell{j};
-    #            c=c+1;
-    #        end
-    #    else
-    #        Options{c}=varargin{i};
-    #        c=c+1;
-    #    end
-    #end
-    #OS=length(Options);
-    #if rem(OS,2)
-    #   error('Something is wrong with argument name and argument value pairs.') 
-    #end
-    #
-    #for i=1:2:OS
-    #   switch Options{i}
-    #        case {'stopCrit'}
-    #            if (strmatch(Options{i+1},{'M'; 'corr'; 'mse'; 'mse_change'},'exact'));
-    #                STOPCRIT    = Options{i+1};  
-    #            else error('stopCrit must be char string [M, corr, mse, mse_change]. Exiting.'); end 
-    #        case {'stopTol'}
-    #            if isa(Options{i+1},'numeric') ; STOPTOL     = Options{i+1};   
-    #            else error('stopTol must be number. Exiting.'); end
-    #        case {'P_trans'} 
-    #            if isa(Options{i+1},'function_handle'); Pt = Options{i+1};   
-    #            else error('P_trans must be function _handle. Exiting.'); end
-    #        case {'maxIter'}
-    #            if isa(Options{i+1},'numeric'); MAXITER     = Options{i+1};             
-    #            else error('maxIter must be a number. Exiting.'); end
-    #        case {'verbose'}
-    #            if isa(Options{i+1},'logical'); verbose     = Options{i+1};   
-    #            else error('verbose must be a logical. Exiting.'); end 
-    #        case {'start_val'}
-    #            if isa(Options{i+1},'numeric') & length(Options{i+1}) == m ;
-    #                s_initial     = Options{i+1};   
-    #                initial_given=1;
-    #            else error('start_val must be a vector of length m. Exiting.'); end
-    #        otherwise
-    #            error('Unrecognised option. Exiting.') 
-    #   end
-    #end
-    if 'stopCrit' in opts:
-        STOPCRIT = opts['stopCrit']
-    if 'stopTol' in opts:
-        if hasattr(opts['stopTol'], '__int__'):  # check if numeric
-            STOPTOL = opts['stopTol']
-        else:
-            raise TypeError('stopTol must be number. Exiting.')
-    if 'P_trans' in opts:
-        if hasattr(opts['P_trans'], '__call__'):  # check if function handle
-            Pt = opts['P_trans']
-        else:
-            raise TypeError('P_trans must be function _handle. Exiting.')
-    if 'maxIter' in opts:
-        if hasattr(opts['maxIter'], '__int__'):  # check if numeric
-            MAXITER = opts['maxIter']
-        else:
-            raise TypeError('maxIter must be a number. Exiting.')
-    if 'verbose' in opts:
-        # TODO: Should check here if is logical
-        verbose = opts['verbose']
-    if 'start_val' in opts:
-        # TODO: Should check here if is numeric
-        if opts['start_val'].size == m:
-            s_initial = opts['start_val']
-            initial_given = 1
-        else:
-            raise ValueError('start_val must be a vector of length m. Exiting.')
-    # Don't exit if unknown option is given, simply ignore it
-
-    #if strcmp(STOPCRIT,'M') 
-    #    maxM=STOPTOL;
-    #else
-    #    maxM=MAXITER;
-    #end
-    if STOPCRIT == 'M':
-        maxM = STOPTOL
-    else:
-        maxM = MAXITER
-    
-    #    if nargout >=2
-    #        err_mse = zeros(maxM,1);
-    #    end
-    #    if nargout ==3
-    #        iter_time = zeros(maxM,1);
-    #    end
-    if opts['nargout'] >= 2:
-        err_mse = np.zeros(maxM)
-    if opts['nargout'] == 3:
-        iter_time = np.zeros(maxM)
-    
-    ###########################################################################
-    #                        Make P and Pt functions
-    ###########################################################################
-    #if          isa(A,'float')      P =@(z) A*z;  Pt =@(z) A'*z;
-    #elseif      isobject(A)         P =@(z) A*z;  Pt =@(z) A'*z;
-    #elseif      isa(A,'function_handle') 
-    #    try
-    #        if          isa(Pt,'function_handle'); P=A;
-    #        else        error('If P is a function handle, Pt also needs to be a function handle. Exiting.'); end
-    #    catch error('If P is a function handle, Pt needs to be specified. Exiting.'); end
-    #else        error('P is of unsupported type. Use matrix, function_handle or object. Exiting.'); end
-    if hasattr(A, '__call__'):
-        if hasattr(Pt, '__call__'):
-            P = A
-        else:
-            raise TypeError('If P is a function handle, Pt also needs to be a function handle.')
-    else:
-        # TODO: should check here if A is matrix
-        P  = lambda z: np.dot(A,z)
-        Pt = lambda z: np.dot(A.T,z)
-        
-    ###########################################################################
-    #                 Random Check to see if dictionary is normalised 
-    ###########################################################################
-    #    mask=zeros(m,1);
-    #    mask(ceil(rand*m))=1;
-    #    nP=norm(P(mask));
-    #    if abs(1-nP)>1e-3;
-    #        display('Dictionary appears not to have unit norm columns.')
-    #    end
-    mask = np.zeros(m)
-    mask[math.floor(np.random.rand() * m)] = 1
-    nP = np.linalg.norm(P(mask))
-    if abs(1-nP) > 1e-3:
-        print 'Dictionary appears not to have unit norm columns.'
-    #end
-    
-    ###########################################################################
-    #              Check if we have enough memory and initialise 
-    ###########################################################################
-    #        try Q=zeros(n,maxM);
-    #        catch error('Variable size is too large. Please try greed_omp_chol algorithm or reduce MAXITER.')
-    #        end 
-    #        try R=zeros(maxM);
-    #        catch error('Variable size is too large. Please try greed_omp_chol algorithm or reduce MAXITER.')
-    #        end
-    try:
-        Q = np.zeros((n,maxM))
-    except:
-        print 'Variable size is too large. Please try greed_omp_chol algorithm or reduce MAXITER.'
-        raise
-    try:
-        R = np.zeros((maxM, maxM))
-    except:
-        print 'Variable size is too large. Please try greed_omp_chol algorithm or reduce MAXITER.'
-        raise
-        
-    ###########################################################################
-    #                        Do we start from zero or not?
-    ###########################################################################
-    #if initial_given ==1;
-    #    IN          = find(s_initial);
-    #    if ~isempty(IN)
-    #        Residual    = x-P(s_initial);
-    #        lengthIN=length(IN);
-    #        z=[];
-    #        for k=1:length(IN)
-    #            # Extract new element
-    #             mask=zeros(m,1);
-    #             mask(IN(k))=1;
-    #             new_element=P(mask);
-    #
-    #            # Orthogonalise new element 
-    #             qP=Q(:,1:k-1)'*new_element;
-    #             q=new_element-Q(:,1:k-1)*(qP);
-    #
-    #             nq=norm(q);
-    #             q=q/nq;
-    #            # Update QR factorisation 
-    #             R(1:k-1,k)=qP;
-    #             R(k,k)=nq;
-    #             Q(:,k)=q;
-    #
-    #             z(k)=q'*x;
-    #        end
-    #        s           = s_initial;
-    #        Residual=x-Q(:,k)*z;
-    #        oldERR      = Residual'*Residual/n;
-    #    else
-    #    	IN          = [];
-    #        Residual    = x;
-    #        s           = s_initial;
-    #        sigsize     = x'*x/n;
-    #        oldERR      = sigsize;
-    #        k=0;
-    #        z=[];
-    #    end
-    #    
-    #else
-    #    IN          = [];
-    #    Residual    = x;
-    #    s           = s_initial;
-    #    sigsize     = x'*x/n;
-    #    oldERR      = sigsize;
-    #    k=0;
-    #    z=[];
-    #end
-    if initial_given == 1:
-        #IN = find(s_initial);
-        IN = np.nonzero(s_initial)[0].tolist()
-        #if ~isempty(IN)
-        if IN.size > 0:
-            Residual = x - P(s_initial)
-            lengthIN = IN.size
-            z = np.array([])
-            #for k=1:length(IN)
-            for k in np.arange(IN.size):
-                # Extract new element
-                 mask = np.zeros(m)
-                 mask[IN[k]] = 1
-                 new_element = P(mask)
-                 
-                 # Orthogonalise new element 
-                 #qP=Q(:,1:k-1)'*new_element;
-                 if k-1 >= 0:
-                     qP = np.dot(Q[:,0:k].T , new_element)
-                     #q=new_element-Q(:,1:k-1)*(qP);
-                     q = new_element - np.dot(Q[:,0:k] , qP)
-                     
-                     nq = np.linalg.norm(q)
-                     q = q / nq
-                     # Update QR factorisation 
-                     R[0:k,k] = qP
-                     R[k,k] = nq
-                     Q[:,k] = q
-                 else:
-                     q = new_element
-                     
-                     nq = np.linalg.norm(q)
-                     q = q / nq
-                     # Update QR factorisation 
-                     R[k,k] = nq
-                     Q[:,k] = q
-                     
-                 z[k] = np.dot(q.T , x)
-            #end
-            s        = s_initial.copy()
-            Residual = x - np.dot(Q[:,k] , z)
-            oldERR   = np.vdot(Residual , Residual) / n;
-        else:
-            #IN          = np.array([], dtype = int)
-            IN          = np.array([], dtype = int).tolist()
-            Residual    = x.copy()
-            s           = s_initial.copy()
-            sigsize     = np.vdot(x , x) / n
-            oldERR      = sigsize
-            k = 0
-            #z = np.array([])
-            z = []
-        #end
-        
-    else:
-        #IN          = np.array([], dtype = int)
-        IN          = np.array([], dtype = int).tolist()
-        Residual    = x.copy()
-        s           = s_initial.copy()
-        sigsize     = np.vdot(x , x) / n
-        oldERR      = sigsize
-        k = 0
-        #z = np.array([])
-        z = []
-    #end
-    
-    ###########################################################################
-    #                        Main algorithm
-    ###########################################################################
-    #    if verbose
-    #       display('Main iterations...') 
-    #    end
-    #    tic
-    #    t=0;
-    #    DR=Pt(Residual);
-    #    done = 0;
-    #    iter=1;
-    if verbose:
-       print 'Main iterations...'
-    tic = time.time()
-    t = 0
-    DR = Pt(Residual)
-    done = 0
-    iter = 1
-    
-    #while ~done
-    #    
-    #     # Select new element
-    #     DR(IN)=0;
-    #     # Nic: replace selection with random variable
-    #     # i.e. Randomized OMP!!
-    #     # DON'T FORGET ABOUT THIS!!
-    #     [v I]=max(abs(DR));
-    #     #I = randp(exp(abs(DR).^2 ./ (norms.^2)'), [1 1]);
-    #     IN=[IN I];
-    #
-    #    
-    #     k=k+1;
-    #     # Extract new element
-    #     mask=zeros(m,1);
-    #     mask(IN(k))=1;
-    #     new_element=P(mask);
-    #
-    #    # Orthogonalise new element 
-    #     qP=Q(:,1:k-1)'*new_element;
-    #     q=new_element-Q(:,1:k-1)*(qP);
-    #
-    #     nq=norm(q);
-    #     q=q/nq;
-    #    # Update QR factorisation 
-    #     R(1:k-1,k)=qP;
-    #     R(k,k)=nq;
-    #     Q(:,k)=q;
-    #
-    #     z(k)=q'*x;
-    #   
-    #    # New residual 
-    #     Residual=Residual-q*(z(k));
-    #     DR=Pt(Residual);
-    #     
-    #     ERR=Residual'*Residual/n;
-    #     if comp_err
-    #         err_mse(iter)=ERR;
-    #     end
-    #     
-    #     if comp_time
-    #         iter_time(iter)=toc;
-    #     end
-    #
-    ############################################################################
-    ##                        Are we done yet?
-    ############################################################################
-    #     
-    #     if strcmp(STOPCRIT,'M')
-    #         if iter >= STOPTOL
-    #             done =1;
-    #         elseif verbose && toc-t>10
-    #            display(sprintf('Iteration #i. --- #i iterations to go',iter ,STOPTOL-iter)) 
-    #            t=toc;
-    #         end
-    #    elseif strcmp(STOPCRIT,'mse')
-    #         if comp_err
-    #            if err_mse(iter)<STOPTOL;
-    #                done = 1; 
-    #            elseif verbose && toc-t>10
-    #                display(sprintf('Iteration #i. --- #i mse',iter ,err_mse(iter))) 
-    #                t=toc;
-    #            end
-    #         else
-    #             if ERR<STOPTOL;
-    #                done = 1; 
-    #             elseif verbose && toc-t>10
-    #                display(sprintf('Iteration #i. --- #i mse',iter ,ERR)) 
-    #                t=toc;
-    #             end
-    #         end
-    #     elseif strcmp(STOPCRIT,'mse_change') && iter >=2
-    #         if comp_err && iter >=2
-    #              if ((err_mse(iter-1)-err_mse(iter))/sigsize <STOPTOL);
-    #                done = 1; 
-    #             elseif verbose && toc-t>10
-    #                display(sprintf('Iteration #i. --- #i mse change',iter ,(err_mse(iter-1)-err_mse(iter))/sigsize )) 
-    #                t=toc;
-    #             end
-    #         else
-    #             if ((oldERR - ERR)/sigsize < STOPTOL);
-    #                done = 1; 
-    #             elseif verbose && toc-t>10
-    #                display(sprintf('Iteration #i. --- #i mse change',iter ,(oldERR - ERR)/sigsize)) 
-    #                t=toc;
-    #             end
-    #         end
-    #     elseif strcmp(STOPCRIT,'corr') 
-    #          if max(abs(DR)) < STOPTOL;
-    #             done = 1; 
-    #          elseif verbose && toc-t>10
-    #                display(sprintf('Iteration #i. --- #i corr',iter ,max(abs(DR)))) 
-    #                t=toc;
-    #          end
-    #     end
-    #     
-    #    # Also stop if residual gets too small or maxIter reached
-    #     if comp_err
-    #         if err_mse(iter)<1e-16
-    #             display('Stopping. Exact signal representation found!')
-    #             done=1;
-    #         end
-    #     else
-    #
-    #
-    #         if iter>1
-    #             if ERR<1e-16
-    #                 display('Stopping. Exact signal representation found!')
-    #                 done=1;
-    #             end
-    #         end
-    #     end
-    #
-    #     if iter >= MAXITER
-    #         display('Stopping. Maximum number of iterations reached!')
-    #         done = 1; 
-    #     end
-    #     
-    ############################################################################
-    ##                    If not done, take another round
-    ############################################################################
-    #   
-    #     if ~done
-    #        iter=iter+1;
-    #        oldERR=ERR;
-    #     end
-    #end
-    while not done:
-         
-         # Select new element
-         DR[IN]=0
-         #[v I]=max(abs(DR));
-         #v = np.abs(DR).max()
-         I = np.abs(DR).argmax()
-         #IN = np.concatenate((IN,I))
-         IN.append(I)
-         
-         
-         #k = k + 1  Move to end, since is zero based
-         
-         # Extract new element
-         mask = np.zeros(m)
-         mask[IN[k]] = 1
-         new_element = P(mask)
-    
-         # Orthogonalise new element 
-         if k-1 >= 0:
-             qP = np.dot(Q[:,0:k].T , new_element)
-             q = new_element - np.dot(Q[:,0:k] , qP)
-             
-             nq = np.linalg.norm(q)
-             q = q/nq
-             # Update QR factorisation 
-             R[0:k,k] = qP
-             R[k,k] = nq
-             Q[:,k] = q                
-         else:
-             q = new_element
-             
-             nq = np.linalg.norm(q)
-             q = q/nq
-             # Update QR factorisation 
-             R[k,k] = nq
-             Q[:,k] = q
-
-         #z[k]=np.vdot(q , x)
-         z.append(np.vdot(q , x))
-       
-         # New residual 
-         Residual = Residual - q * (z[k])
-         DR = Pt(Residual)
-         
-         ERR = np.vdot(Residual , Residual) / n
-         if comp_err:
-             err_mse[iter-1] = ERR
-         #end
-         
-         if comp_time:
-             iter_time[iter-1] = time.time() - tic
-         #end
-         
-         ###########################################################################
-         #                        Are we done yet?
-         ###########################################################################
-         if STOPCRIT == 'M':
-             if iter >= STOPTOL:
-                 done = 1
-             elif verbose and time.time() - t > 10.0/1000: # time() returns sec
-                #display(sprintf('Iteration #i. --- #i iterations to go',iter ,STOPTOL-iter)) 
-                print 'Iteration '+iter+'. --- '+(STOPTOL-iter)+' iterations to go'
-                t = time.time()
-             #end
-         elif STOPCRIT =='mse':
-             if comp_err:
-                if err_mse[iter-1] < STOPTOL:
-                    done = 1 
-                elif verbose and time.time() - t > 10.0/1000: # time() returns sec
-                    #display(sprintf('Iteration #i. --- #i mse',iter ,err_mse(iter))) 
-                    print 'Iteration '+iter+'. --- '+err_mse[iter-1]+' mse'
-                    t = time.time()
-                #end
-             else:
-                 if ERR < STOPTOL:
-                    done = 1 
-                 elif verbose and time.time() - t > 10.0/1000: # time() returns sec
-                    #display(sprintf('Iteration #i. --- #i mse',iter ,ERR)) 
-                    print 'Iteration '+iter+'. --- '+ERR+' mse'
-                    t = time.time()
-                 #end
-             #end
-         elif STOPCRIT == 'mse_change' and iter >=2:
-             if comp_err and iter >=2:
-                 if ((err_mse[iter-2] - err_mse[iter-1])/sigsize < STOPTOL):
-                    done = 1
-                 elif verbose and time.time() - t > 10.0/1000: # time() returns sec
-                    #display(sprintf('Iteration #i. --- #i mse change',iter ,(err_mse(iter-1)-err_mse(iter))/sigsize )) 
-                    print 'Iteration '+iter+'. --- '+((err_mse[iter-2]-err_mse[iter-1])/sigsize)+' mse change'
-                    t = time.time()
-                 #end
-             else:
-                 if ((oldERR - ERR)/sigsize < STOPTOL):
-                    done = 1
-                 elif verbose and time.time() - t > 10.0/1000: # time() returns sec
-                    #display(sprintf('Iteration #i. --- #i mse change',iter ,(oldERR - ERR)/sigsize)) 
-                    print 'Iteration '+iter+'. --- '+((oldERR - ERR)/sigsize)+' mse change'
-                    t = time.time()
-                 #end
-             #end
-         elif STOPCRIT == 'corr':
-              if np.abs(DR).max() < STOPTOL:
-                 done = 1 
-              elif verbose and time.time() - t > 10.0/1000: # time() returns sec
-                  #display(sprintf('Iteration #i. --- #i corr',iter ,max(abs(DR)))) 
-                  print 'Iteration '+iter+'. --- '+(np.abs(DR).max())+' corr'
-                  t = time.time()
-              #end
-          #end
-         
-         # Also stop if residual gets too small or maxIter reached
-         if comp_err:
-             if err_mse[iter-1] < 1e-14:
-                 done = 1
-                 # Nic: added verbose check
-                 if verbose:
-                     print 'Stopping. Exact signal representation found!'
-             #end
-         else:
-             if iter > 1:
-                 if ERR < 1e-14:
-                     done = 1 
-                     # Nic: added verbose check
-                     if verbose:
-                         print 'Stopping. Exact signal representation found!'
-                 #end
-             #end
-         #end
-         
-         
-         if iter >= MAXITER:
-             done = 1 
-             # Nic: added verbose check
-             if verbose:
-                 print 'Stopping. Maximum number of iterations reached!'
-         #end
-         
-         ###########################################################################
-         #                    If not done, take another round
-         ###########################################################################
-         if not done:
-            iter = iter + 1
-            oldERR = ERR
-         #end
-         
-         # Moved here from front, since we are 0-based         
-         k = k + 1
-    #end
-    
-    ###########################################################################
-    #            Now we can solve for s by back-substitution
-    ###########################################################################
-    #s(IN)=R(1:k,1:k)\z(1:k)';
-    s[IN] = scipy.linalg.solve(R[0:k,0:k] , np.array(z[0:k]))
-    
-    ###########################################################################
-    #                  Only return as many elements as iterations
-    ###########################################################################
-    if opts['nargout'] >= 2:
-        err_mse = err_mse[0:iter-1]
-    #end
-    if opts['nargout'] == 3:
-        iter_time = iter_time[0:iter-1]
-    #end
-    if verbose:
-       print 'Done'
-    #end
-    
-    # Return
-    if opts['nargout'] == 1:
-        return s
-    elif opts['nargout'] == 2:
-        return s, err_mse
-    elif opts['nargout'] == 3:
-        return s, err_mse, iter_time
-    
-    # Change history
-    #
-    # 8 of Februray: Algo does no longer stop if dictionary is not normaliesd. 
-
-    # End of greed_omp_qr() function
-    #--------------------------------
-    
-    
-def omp_qr(x, dict, D, natom, tolerance):
-    """ Recover x using QR implementation of OMP
-
-   Parameter
-   ---------
-   x: measurements
-   dict: dictionary
-   D: Gramian of dictionary
-   natom: iterations
-   tolerance: error tolerance
-
-   Return
-   ------
-   x_hat : estimate of x
-   gamma : indices where non-zero
-
-   For more information, see http://media.aau.dk/null_space_pursuits/2011/10/efficient-omp.html
-   """
-    msize, dictsize = dict.shape
-    normr2 = np.vdot(x,x)
-    normtol2 = tolerance*normr2
-    R = np.zeros((natom,natom))
-    Q = np.zeros((msize,natom))
-    gamma = []
-
-    # find initial projections
-    origprojections = np.dot(x.T,dict)
-    origprojectionsT = origprojections.T
-    projections = origprojections.copy();
-
-    k = 0
-    while (normr2 > normtol2) and (k < natom):
-      # find index of maximum magnitude projection
-      newgam = np.argmax(np.abs(projections ** 2))
-      gamma.append(newgam)
-      # update QR factorization, projections, and residual energy
-      if k == 0:
-        R[0,0] = 1
-        Q[:,0] = dict[:,newgam].copy()
-        # update projections
-        QtempQtempT = np.outer(Q[:,0],Q[:,0])
-        projections -= np.dot(x.T, np.dot(QtempQtempT,dict))
-        # update residual energy
-        normr2 -= np.vdot(x, np.dot(QtempQtempT,x))
-      else:
-        w = scipy.linalg.solve_triangular(R[0:k,0:k],D[gamma[0:k],newgam],trans=1)
-        R[k,k] = np.sqrt(1-np.vdot(w,w))
-        R[0:k,k] = w.copy()
-        Q[:,k] = (dict[:,newgam] - np.dot(QtempQtempT,dict[:,newgam]))/R[k,k]
-        QkQkT = np.outer(Q[:,k],Q[:,k])
-        xTQkQkT = np.dot(x.T,QkQkT)
-        QtempQtempT += QkQkT
-        # update projections
-        projections -= np.dot(xTQkQkT,dict)
-        # update residual energy
-        normr2 -= np.dot(xTQkQkT,x)
-
-      k += 1
-
-    # build solution
-    tempR = R[0:k,0:k]
-    w = scipy.linalg.solve_triangular(tempR,origprojectionsT[gamma[0:k]],trans=1)
-    x_hat = np.zeros((dictsize,1))
-    x_hat[gamma[0:k]] = scipy.linalg.solve_triangular(tempR,w)
-
-    return x_hat, gamma
\ No newline at end of file
--- a/OMP/omp_app.py	Thu Oct 20 21:06:06 2011 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,149 +0,0 @@
-""" 
-#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#
-# Bob L. Sturm <bst@create.aau.dk> 20111018
-# Department of Architecture, Design and Media Technology
-# Aalborg University Copenhagen
-# Lautrupvang 15, 2750 Ballerup, Denmark
-#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#
-"""
-
-import numpy as np
-from sklearn.utils import check_random_state
-import time
-
-from omp_sk_bugfix import orthogonal_mp
-from omp_QR import greed_omp_qr
-from omp_QR import omp_qr
-
-"""
-Run a problem suite involving sparse vectors in 
-ambientDimension dimensional space, with a resolution
-in the phase plane of numGradations x numGradations,
-and at each indeterminacy and sparsity pair run
-numTrials independent trials.
-
-Outputs a text file denoting successes at each phase point.
-For more on phase transitions, see:
-D. L. Donoho and J. Tanner, "Precise undersampling theorems," 
-Proc. IEEE, vol. 98, no. 6, pp. 913-924, June 2010.
-"""
-
-def runProblemSuite(ambientDimension,numGradations,numTrials):
-
-  idx = np.arange(ambientDimension)
-  phaseDelta = np.linspace(0.05,1,numGradations)
-  phaseRho = np.linspace(0.05,1,numGradations)
-  success = np.zeros((numGradations, numGradations))
-  
-  #Nic: init timers
-  t1all = 0
-  t2all = 0
-  t3all = 0
-  
-  deltaCounter = 0
-  # delta is number of measurements/
-  for delta in phaseDelta[:17]:
-    rhoCounter = 0
-    for rho in phaseRho:
-      print(deltaCounter,rhoCounter)
-      numMeasurements = int(delta*ambientDimension)
-      sparsity = int(rho*numMeasurements)
-      # how do I set the following to be random each time?
-      generator = check_random_state(100)
-      # create unit norm dictionary
-      D = generator.randn(numMeasurements, ambientDimension)
-      D /= np.sqrt(np.sum((D ** 2), axis=0))
-      # compute Gramian (for efficiency)
-      DTD = np.dot(D.T,D)
-  
-      successCounter = 0
-      trial = numTrials
-      while trial > 0:
-        # generate sparse signal with a minimum non-zero value
-        x = np.zeros((ambientDimension, 1))
-        idx2 = idx
-        generator.shuffle(idx2)
-        idx3 = idx2[:sparsity]
-        while np.min(np.abs(x[idx3,0])) < 1e-10 :
-           x[idx3,0] = generator.randn(sparsity)
-        # sense sparse signal
-        y = np.dot(D, x)
-        
-        # Nic: Use sparsify OMP function (translated from Matlab)
-        ompopts = dict({'stopCrit':'M', 'stopTol':2*sparsity})
-        starttime = time.time()                     # start timer
-        x_r2, errs, times = greed_omp_qr(y.squeeze().copy(), D.copy(), D.shape[1], ompopts)
-        t2all = t2all + time.time() - starttime     # stop timer
-        idx_r2 = np.nonzero(x_r2)[0]        
-        
-        # run to two times expected sparsity, or tolerance
-        # why? Often times, OMP can retrieve the correct solution
-        # when it is run for more than the expected sparsity
-        #x_r, idx_r = omp_qr(y,D,DTD,2*sparsity,1e-5)
-        # Nic: adjust tolerance to match with other function
-        starttime = time.time()                     # start timer
-        x_r, idx_r = omp_qr(y.copy(),D.copy(),DTD.copy(),2*sparsity,numMeasurements*1e-14/np.vdot(y,y))
-        t1all = t1all + time.time() - starttime     # stop timer        
-        
-        # Nic: test sklearn omp
-        starttime = time.time()                     # start timer
-        x_r3 = orthogonal_mp(D.copy(), y.copy(), n_nonzero_coefs=2*sparsity, tol=numMeasurements*1e-14, precompute_gram=False, copy_X=True)
-        idx_r3 = np.nonzero(x_r3)[0]
-        t3all = t3all + time.time() - starttime     # stop timer        
-        
-        # Nic: compare results
-        print 'diff1 = ',np.linalg.norm(x_r.squeeze() - x_r2.squeeze())
-        print 'diff2 = ',np.linalg.norm(x_r.squeeze() - x_r3.squeeze())
-        print 'diff3 = ',np.linalg.norm(x_r2.squeeze() - x_r3.squeeze())
-        print "Bob's total time = ", t1all
-        print "Nic's total time = ", t2all
-        print "Skl's total time = ", t3all
-        if np.linalg.norm(x_r.squeeze() - x_r2.squeeze()) > 1e-10 or \
-           np.linalg.norm(x_r.squeeze() - x_r3.squeeze()) > 1e-10 or \
-           np.linalg.norm(x_r2.squeeze() - x_r3.squeeze()) > 1e-10:
-            print "STOP: Different results"
-            print "Bob's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r).squeeze())
-            print "Nic's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r2).squeeze())
-            print "Skl's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r3).squeeze())
-            raise ValueError("Different results")
-  
-        # debais to remove small entries
-        for nn in idx_r:
-          if abs(x_r[nn]) < 1e-10:
-            x_r[nn] = 0
-  
-        # exact recovery condition using support
-        #if sorted(np.flatnonzero(x_r)) == sorted(np.flatnonzero(x)):
-        #  successCounter += 1
-        # exact recovery condition using error in solution
-        error = x - x_r
-        """ the following is the exact recovery condition in: A. Maleki 
-              and D. L. Donoho, "Optimally tuned iterative reconstruction 
-              algorithms for compressed sensing," IEEE J. Selected Topics 
-              in Signal Process., vol. 4, pp. 330-341, Apr. 2010. """
-        if np.vdot(error,error) < np.vdot(x,x)*1e-4:
-          successCounter += 1
-        trial -= 1
-  
-      success[rhoCounter,deltaCounter] = successCounter
-      if successCounter == 0:
-        break
-  
-      rhoCounter += 1
-      #np.savetxt('test.txt',success,fmt='#2.1d',delimiter=',')
-    deltaCounter += 1
-
-if __name__ == '__main__':
-  print ('Running problem suite')
-  ambientDimension = 400
-  numGradations = 30
-  numTrials = 1
-  
-  #import cProfile
-  #cProfile.run('runProblemSuite(ambientDimension,numGradations,numTrials)','profres')  
-  runProblemSuite(ambientDimension,numGradations,numTrials) 
-  print "Done"
-  
-  #import pstats
-  #p = pstats.Stats('D:\Nic\Dev2\profres')
-  #p.sort_stats('cumulative').print_stats(10)
--- a/OMP/omp_sk_bugfix.py	Thu Oct 20 21:06:06 2011 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,564 +0,0 @@
-"""Orthogonal matching pursuit algorithms
-"""
-
-# Author: Vlad Niculae
-#
-# License: BSD Style.
-
-from warnings import warn
-
-import numpy as np
-from scipy import linalg
-from scipy.linalg.lapack import get_lapack_funcs
-
-#from .base import LinearModel
-#from ..utils.arrayfuncs import solve_triangular
-from sklearn.linear_model.base import LinearModel
-from sklearn.utils.arrayfuncs import solve_triangular
-
-premature = """ Orthogonal matching pursuit ended prematurely due to linear
-dependence in the dictionary. The requested precision might not have been met.
-"""
-
-
-def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True):
-    """Orthogonal Matching Pursuit step using the Cholesky decomposition.
-
-Parameters:
------------
-X: array, shape = (n_samples, n_features)
-Input dictionary. Columns are assumed to have unit norm.
-
-y: array, shape = (n_samples,)
-Input targets
-
-n_nonzero_coefs: int
-Targeted number of non-zero elements
-
-tol: float
-Targeted squared error, if not None overrides n_nonzero_coefs.
-
-copy_X: bool, optional
-Whether the design matrix X must be copied by the algorithm. A false
-value is only helpful if X is already Fortran-ordered, otherwise a
-copy is made anyway.
-
-Returns:
---------
-gamma: array, shape = (n_nonzero_coefs,)
-Non-zero elements of the solution
-
-idx: array, shape = (n_nonzero_coefs,)
-Indices of the positions of the elements in gamma within the solution
-vector
-
-"""
-    if copy_X:
-        X = X.copy('F')
-    else: # even if we are allowed to overwrite, still copy it if bad order
-        X = np.asfortranarray(X)
-
-    min_float = np.finfo(X.dtype).eps
-    nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
-    potrs, = get_lapack_funcs(('potrs',), (X,))
-
-    alpha = np.dot(X.T, y)
-    residual = y
-    n_active = 0
-    indices = range(X.shape[1]) # keeping track of swapping
-
-    #max_features = X.shape[1] if tol is not None else n_nonzero_coefs
-    # Nic: tol not None should not overide n_nonzero_coefs, but act together
-    max_features = n_nonzero_coefs
-    
-    L = np.empty((max_features, max_features), dtype=X.dtype)
-    L[0, 0] = 1.
-
-    while True:
-        lam = np.argmax(np.abs(np.dot(X.T, residual)))
-        if lam < n_active or alpha[lam] ** 2 < min_float:
-            # atom already selected or inner product too small
-            warn(premature)
-            break
-        if n_active > 0:
-            # Updates the Cholesky decomposition of X' X
-            L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
-            solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
-            v = nrm2(L[n_active, :n_active]) ** 2
-            if 1 - v <= min_float: # selected atoms are dependent
-                warn(premature)
-                break
-            L[n_active, n_active] = np.sqrt(1 - v)
-        X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
-        alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
-        indices[n_active], indices[lam] = indices[lam], indices[n_active]
-        n_active += 1
-        # solves LL'x = y as a composition of two triangular systems
-        gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
-                         overwrite_b=False)
-
-        residual = y - np.dot(X[:, :n_active], gamma)
-        if tol is not None and nrm2(residual) ** 2 <= tol:
-            break
-        #elif n_active == max_features:
-        # Nic: tol not None should not overide n_nonzero_coefs, but act together
-        if n_active == max_features:
-            break
-
-    return gamma, indices[:n_active]
-
-
-def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
-              copy_Gram=True, copy_Xy=True):
-    """Orthogonal Matching Pursuit step on a precomputed Gram matrix.
-
-This function uses the the Cholesky decomposition method.
-
-Parameters:
------------
-Gram: array, shape = (n_features, n_features)
-Gram matrix of the input data matrix
-
-Xy: array, shape = (n_features,)
-Input targets
-
-n_nonzero_coefs: int
-Targeted number of non-zero elements
-
-tol_0: float
-Squared norm of y, required if tol is not None.
-
-tol: float
-Targeted squared error, if not None overrides n_nonzero_coefs.
-
-copy_Gram: bool, optional
-Whether the gram matrix must be copied by the algorithm. A false
-value is only helpful if it is already Fortran-ordered, otherwise a
-copy is made anyway.
-
-copy_Xy: bool, optional
-Whether the covariance vector Xy must be copied by the algorithm.
-If False, it may be overwritten.
-
-Returns:
---------
-gamma: array, shape = (n_nonzero_coefs,)
-Non-zero elements of the solution
-
-idx: array, shape = (n_nonzero_coefs,)
-Indices of the positions of the elements in gamma within the solution
-vector
-
-"""
-    Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
-
-    if copy_Xy:
-        Xy = Xy.copy()
-
-    min_float = np.finfo(Gram.dtype).eps
-    nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
-    potrs, = get_lapack_funcs(('potrs',), (Gram,))
-
-    indices = range(len(Gram)) # keeping track of swapping
-    alpha = Xy
-    tol_curr = tol_0
-    delta = 0
-    n_active = 0
-
-    max_features = len(Gram) if tol is not None else n_nonzero_coefs
-    L = np.empty((max_features, max_features), dtype=Gram.dtype)
-    L[0, 0] = 1.
-
-    while True:
-        lam = np.argmax(np.abs(alpha))
-        if lam < n_active or alpha[lam] ** 2 < min_float:
-            # selected same atom twice, or inner product too small
-            warn(premature)
-            break
-        if n_active > 0:
-            L[n_active, :n_active] = Gram[lam, :n_active]
-            solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
-            v = nrm2(L[n_active, :n_active]) ** 2
-            if 1 - v <= min_float: # selected atoms are dependent
-                warn(premature)
-                break
-            L[n_active, n_active] = np.sqrt(1 - v)
-        Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
-        Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
-        indices[n_active], indices[lam] = indices[lam], indices[n_active]
-        Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
-        n_active += 1
-        # solves LL'x = y as a composition of two triangular systems
-        gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
-                         overwrite_b=False)
-
-        beta = np.dot(Gram[:, :n_active], gamma)
-        alpha = Xy - beta
-        if tol is not None:
-            tol_curr += delta
-            delta = np.inner(gamma, beta[:n_active])
-            tol_curr -= delta
-            if tol_curr <= tol:
-                break
-        elif n_active == max_features:
-            break
-
-    return gamma, indices[:n_active]
-
-
-def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute_gram=False,
-                  copy_X=True):
-    """Orthogonal Matching Pursuit (OMP)
-
-Solves n_targets Orthogonal Matching Pursuit problems.
-An instance of the problem has the form:
-
-When parametrized by the number of non-zero coefficients using
-`n_nonzero_coefs`:
-argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
-
-When parametrized by error using the parameter `tol`:
-argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
-
-Parameters
-----------
-X: array, shape = (n_samples, n_features)
-Input data. Columns are assumed to have unit norm.
-
-y: array, shape = (n_samples,) or (n_samples, n_targets)
-Input targets
-
-n_nonzero_coefs: int
-Desired number of non-zero entries in the solution. If None (by
-default) this value is set to 10% of n_features.
-
-tol: float
-Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
-
-Nic: tol does not oveeride n_nonzero_coefs, the wto conditions act jointly!
-
-precompute_gram: {True, False, 'auto'},
-Whether to perform precomputations. Improves performance when n_targets
-or n_samples is very large.
-
-copy_X: bool, optional
-Whether the design matrix X must be copied by the algorithm. A false
-value is only helpful if X is already Fortran-ordered, otherwise a
-copy is made anyway.
-
-Returns
--------
-coef: array, shape = (n_features,) or (n_features, n_targets)
-Coefficients of the OMP solution
-
-See also
---------
-OrthogonalMatchingPursuit
-orthogonal_mp_gram
-lars_path
-decomposition.sparse_encode
-decomposition.sparse_encode_parallel
-
-Notes
------
-Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
-Matching pursuits with time-frequency dictionaries, IEEE Transactions on
-Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
-(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
-
-This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
-M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
-Matching Pursuit Technical Report - CS Technion, April 2008.
-http://www.cs.technion.ac.il/~ronrubin/Publications/KSVX-OMP-v2.pdf
-
-"""
-    X, y = map(np.asanyarray, (X, y))
-    if y.ndim == 1:
-        y = y[:, np.newaxis]
-    if copy_X:
-        X = X.copy('F')
-        copy_X = False
-    else:
-        X = np.asfortranarray(X)
-    if y.shape[1] > 1: # subsequent targets will be affected
-        copy_X = True
-    if n_nonzero_coefs == None and tol == None:
-        n_nonzero_coefs = int(0.1 * X.shape[1])
-    if tol is not None and tol < 0:
-        raise ValueError("Epsilon cannot be negative")
-    if tol is None and n_nonzero_coefs <= 0:
-        raise ValueError("The number of atoms must be positive")
-    if tol is None and n_nonzero_coefs > X.shape[1]:
-        raise ValueError("The number of atoms cannot be more than the number \
-of features")
-    if precompute_gram == 'auto':
-        precompute_gram = X.shape[0] > X.shape[1]
-    if precompute_gram:
-        G = np.dot(X.T, X)
-        G = np.asfortranarray(G)
-        Xy = np.dot(X.T, y)
-        if tol is not None:
-            norms_squared = np.sum((y ** 2), axis=0)
-        else:
-            norms_squared = None
-        return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
-                                  copy_Gram=copy_X, copy_Xy=False)
-
-    coef = np.zeros((X.shape[1], y.shape[1]))
-    for k in xrange(y.shape[1]):
-        x, idx = _cholesky_omp(X, y[:, k], n_nonzero_coefs, tol,
-                               copy_X=copy_X)
-        coef[idx, k] = x
-    return np.squeeze(coef)
-
-
-def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
-                       norms_squared=None, copy_Gram=True,
-                       copy_Xy=True):
-    """Gram Orthogonal Matching Pursuit (OMP)
-
-Solves n_targets Orthogonal Matching Pursuit problems using only
-the Gram matrix X.T * X and the product X.T * y.
-
-Parameters
-----------
-Gram: array, shape = (n_features, n_features)
-Gram matrix of the input data: X.T * X
-
-Xy: array, shape = (n_features,) or (n_features, n_targets)
-Input targets multiplied by X: X.T * y
-
-n_nonzero_coefs: int
-Desired number of non-zero entries in the solution. If None (by
-default) this value is set to 10% of n_features.
-
-tol: float
-Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
-
-norms_squared: array-like, shape = (n_targets,)
-Squared L2 norms of the lines of y. Required if tol is not None.
-
-copy_Gram: bool, optional
-Whether the gram matrix must be copied by the algorithm. A false
-value is only helpful if it is already Fortran-ordered, otherwise a
-copy is made anyway.
-
-copy_Xy: bool, optional
-Whether the covariance vector Xy must be copied by the algorithm.
-If False, it may be overwritten.
-
-Returns
--------
-coef: array, shape = (n_features,) or (n_features, n_targets)
-Coefficients of the OMP solution
-
-See also
---------
-OrthogonalMatchingPursuit
-orthogonal_mp
-lars_path
-decomposition.sparse_encode
-decomposition.sparse_encode_parallel
-
-Notes
------
-Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
-Matching pursuits with time-frequency dictionaries, IEEE Transactions on
-Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
-(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
-
-This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
-M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
-Matching Pursuit Technical Report - CS Technion, April 2008.
-http://www.cs.technion.ac.il/~ronrubin/Publications/KSVX-OMP-v2.pdf
-
-"""
-    Gram, Xy = map(np.asanyarray, (Gram, Xy))
-    if Xy.ndim == 1:
-        Xy = Xy[:, np.newaxis]
-        if tol is not None:
-            norms_squared = [norms_squared]
-
-    if n_nonzero_coefs == None and tol is None:
-        n_nonzero_coefs = int(0.1 * len(Gram))
-    if tol is not None and norms_squared == None:
-        raise ValueError('Gram OMP needs the precomputed norms in order \
-to evaluate the error sum of squares.')
-    if tol is not None and tol < 0:
-        raise ValueError("Epsilon cennot be negative")
-    if tol is None and n_nonzero_coefs <= 0:
-        raise ValueError("The number of atoms must be positive")
-    if tol is None and n_nonzero_coefs > len(Gram):
-        raise ValueError("The number of atoms cannot be more than the number \
-of features")
-    coef = np.zeros((len(Gram), Xy.shape[1]))
-    for k in range(Xy.shape[1]):
-        x, idx = _gram_omp(Gram, Xy[:, k], n_nonzero_coefs,
-                           norms_squared[k] if tol is not None else None, tol,
-                           copy_Gram=copy_Gram, copy_Xy=copy_Xy)
-        coef[idx, k] = x
-    return np.squeeze(coef)
-
-
-class OrthogonalMatchingPursuit(LinearModel):
-    """Orthogonal Mathching Pursuit model (OMP)
-
-Parameters
-----------
-n_nonzero_coefs: int, optional
-Desired number of non-zero entries in the solution. If None (by
-default) this value is set to 10% of n_features.
-
-tol: float, optional
-Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
-
-fit_intercept: boolean, optional
-whether to calculate the intercept for this model. If set
-to false, no intercept will be used in calculations
-(e.g. data is expected to be already centered).
-
-normalize: boolean, optional
-If False, the regressors X are assumed to be already normalized.
-
-precompute_gram: {True, False, 'auto'},
-Whether to use a precomputed Gram and Xy matrix to speed up
-calculations. Improves performance when `n_targets` or `n_samples` is
-very large. Note that if you already have such matrices, you can pass
-them directly to the fit method.
-
-copy_X: bool, optional
-Whether the design matrix X must be copied by the algorithm. A false
-value is only helpful if X is already Fortran-ordered, otherwise a
-copy is made anyway.
-
-copy_Gram: bool, optional
-Whether the gram matrix must be copied by the algorithm. A false
-value is only helpful if X is already Fortran-ordered, otherwise a
-copy is made anyway.
-
-copy_Xy: bool, optional
-Whether the covariance vector Xy must be copied by the algorithm.
-If False, it may be overwritten.
-
-
-Attributes
-----------
-coef_: array, shape = (n_features,) or (n_features, n_targets)
-parameter vector (w in the fomulation formula)
-
-intercept_: float or array, shape =(n_targets,)
-independent term in decision function.
-
-Notes
------
-Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
-Matching pursuits with time-frequency dictionaries, IEEE Transactions on
-Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
-(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
-
-This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
-M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
-Matching Pursuit Technical Report - CS Technion, April 2008.
-http://www.cs.technion.ac.il/~ronrubin/Publications/KSVX-OMP-v2.pdf
-
-See also
---------
-orthogonal_mp
-orthogonal_mp_gram
-lars_path
-Lars
-LassoLars
-decomposition.sparse_encode
-decomposition.sparse_encode_parallel
-
-"""
-    def __init__(self, copy_X=True, copy_Gram=True,
-            copy_Xy=True, n_nonzero_coefs=None, tol=None,
-            fit_intercept=True, normalize=True, precompute_gram=False):
-        self.n_nonzero_coefs = n_nonzero_coefs
-        self.tol = tol
-        self.fit_intercept = fit_intercept
-        self.normalize = normalize
-        self.precompute_gram = precompute_gram
-        self.copy_Gram = copy_Gram
-        self.copy_Xy = copy_Xy
-        self.copy_X = copy_X
-
-    def fit(self, X, y, Gram=None, Xy=None):
-        """Fit the model using X, y as training data.
-
-Parameters
-----------
-X: array-like, shape = (n_samples, n_features)
-Training data.
-
-y: array-like, shape = (n_samples,) or (n_samples, n_targets)
-Target values.
-
-Gram: array-like, shape = (n_features, n_features) (optional)
-Gram matrix of the input data: X.T * X
-
-Xy: array-like, shape = (n_features,) or (n_features, n_targets)
-(optional)
-Input targets multiplied by X: X.T * y
-
-
-Returns
--------
-self: object
-returns an instance of self.
-"""
-        X = np.atleast_2d(X)
-        y = np.atleast_1d(y)
-        n_features = X.shape[1]
-
-        X, y, X_mean, y_mean, X_std = self._center_data(X, y,
-                                                        self.fit_intercept,
-                                                        self.normalize,
-                                                        self.copy_X)
-
-        if self.n_nonzero_coefs == None and self.tol is None:
-            self.n_nonzero_coefs = int(0.1 * n_features)
-
-        if Gram is not None:
-            Gram = np.atleast_2d(Gram)
-
-            if self.copy_Gram:
-                copy_Gram = False
-                Gram = Gram.copy('F')
-            else:
-                Gram = np.asfortranarray(Gram)
-
-            copy_Gram = self.copy_Gram
-            if y.shape[1] > 1: # subsequent targets will be affected
-                copy_Gram = True
-
-            if Xy is None:
-                Xy = np.dot(X.T, y)
-            else:
-                if self.copy_Xy:
-                    Xy = Xy.copy()
-                if self.normalize:
-                    if len(Xy.shape) == 1:
-                        Xy /= X_std
-                    else:
-                        Xy /= X_std[:, np.newaxis]
-
-            if self.normalize:
-                Gram /= X_std
-                Gram /= X_std[:, np.newaxis]
-
-            norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
-            self.coef_ = orthogonal_mp_gram(Gram, Xy, self.n_nonzero_coefs,
-                                            self.tol, norms_sq,
-                                            copy_Gram, True).T
-        else:
-            precompute_gram = self.precompute_gram
-            if precompute_gram == 'auto':
-                precompute_gram = X.shape[0] > X.shape[1]
-            self.coef_ = orthogonal_mp(X, y, self.n_nonzero_coefs, self.tol,
-                                       precompute_gram=self.precompute_gram,
-                                       copy_X=self.copy_X).T
-
-        self._set_intercept(X_mean, y_mean, X_std)
-        return self
\ No newline at end of file
--- a/OMP/omp_test.py	Thu Oct 20 21:06:06 2011 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,157 +0,0 @@
-""" 
-#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#
-# Bob L. Sturm <bst@create.aau.dk> 20111018
-# Department of Architecture, Design and Media Technology
-# Aalborg University Copenhagen
-# Lautrupvang 15, 2750 Ballerup, Denmark
-#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#
-"""
-
-import unittest
-
-import numpy as np
-from sklearn.utils import check_random_state
-import time
-
-from omp_sk_bugfix import orthogonal_mp
-from omp_QR import greed_omp_qr
-from omp_QR import omp_qr
-
-"""
-Run a problem suite involving sparse vectors in 
-ambientDimension dimensional space, with a resolution
-in the phase plane of numGradations x numGradations,
-and at each indeterminacy and sparsity pair run
-numTrials independent trials.
-
-Outputs a text file denoting successes at each phase point.
-For more on phase transitions, see:
-D. L. Donoho and J. Tanner, "Precise undersampling theorems," 
-Proc. IEEE, vol. 98, no. 6, pp. 913-924, June 2010.
-"""
-
-class CompareResults(unittest.TestCase):
-    
-    def testCompareResults(self):
-      """OMP results should be almost the same with all implementations"""
-      ambientDimension = 400
-      numGradations = 30
-      numTrials = 1
-      runProblemSuite(ambientDimension,numGradations,numTrials, verbose=False) 
-
-
-
-def runProblemSuite(ambientDimension,numGradations,numTrials, verbose):
-
-  idx = np.arange(ambientDimension)
-  phaseDelta = np.linspace(0.05,1,numGradations)
-  phaseRho = np.linspace(0.05,1,numGradations)
-  success = np.zeros((numGradations, numGradations))
-  
-  #Nic: init timers
-  t1all = 0
-  t2all = 0
-  t3all = 0
-  
-  deltaCounter = 0
-  # delta is number of measurements/
-  for delta in phaseDelta[:17]:
-    rhoCounter = 0
-    for rho in phaseRho:
-      if verbose:
-          print(deltaCounter,rhoCounter)
-          
-      numMeasurements = int(delta*ambientDimension)
-      sparsity = int(rho*numMeasurements)
-      # how do I set the following to be random each time?
-      generator = check_random_state(100)
-      # create unit norm dictionary
-      D = generator.randn(numMeasurements, ambientDimension)
-      D /= np.sqrt(np.sum((D ** 2), axis=0))
-      # compute Gramian (for efficiency)
-      DTD = np.dot(D.T,D)
-  
-      successCounter = 0
-      trial = numTrials
-      while trial > 0:
-        # generate sparse signal with a minimum non-zero value
-        x = np.zeros((ambientDimension, 1))
-        idx2 = idx
-        generator.shuffle(idx2)
-        idx3 = idx2[:sparsity]
-        while np.min(np.abs(x[idx3,0])) < 1e-10 :
-           x[idx3,0] = generator.randn(sparsity)
-        # sense sparse signal
-        y = np.dot(D, x)
-        
-        # Nic: Use sparsify OMP function (translated from Matlab)
-        ompopts = dict({'stopCrit':'M', 'stopTol':2*sparsity})
-        starttime = time.time()                     # start timer
-        x_r2, errs, times = greed_omp_qr(y.squeeze().copy(), D.copy(), D.shape[1], ompopts)
-        t2all = t2all + time.time() - starttime     # stop timer
-        idx_r2 = np.nonzero(x_r2)[0]        
-        
-        # run to two times expected sparsity, or tolerance
-        # why? Often times, OMP can retrieve the correct solution
-        # when it is run for more than the expected sparsity
-        #x_r, idx_r = omp_qr(y,D,DTD,2*sparsity,1e-5)
-        # Nic: adjust tolerance to match with other function
-        starttime = time.time()                     # start timer
-        x_r, idx_r = omp_qr(y.copy(),D.copy(),DTD.copy(),2*sparsity,numMeasurements*1e-14/np.vdot(y,y))
-        t1all = t1all + time.time() - starttime     # stop timer        
-        
-        # Nic: test sklearn omp
-        starttime = time.time()                     # start timer
-        x_r3 = orthogonal_mp(D.copy(), y.copy(), 2*sparsity, tol=numMeasurements*1e-14, precompute_gram=False, copy_X=True)
-        idx_r3 = np.nonzero(x_r3)[0]
-        t3all = t3all + time.time() - starttime     # stop timer        
-        
-        # Nic: compare results
-        if verbose:
-            print 'diff1 = ',np.linalg.norm(x_r.squeeze() - x_r2.squeeze())
-            print 'diff2 = ',np.linalg.norm(x_r.squeeze() - x_r3.squeeze())
-            print 'diff3 = ',np.linalg.norm(x_r2.squeeze() - x_r3.squeeze())
-            print "Bob's total time = ", t1all
-            print "Nic's total time = ", t2all
-            print "Skl's total time = ", t3all
-        if np.linalg.norm(x_r.squeeze() - x_r2.squeeze()) > 1e-6 or \
-           np.linalg.norm(x_r.squeeze() - x_r3.squeeze()) > 1e-6 or \
-           np.linalg.norm(x_r2.squeeze() - x_r3.squeeze()) > 1e-6:
-               if verbose:
-                   print "STOP: Different results"
-                   print "Bob's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r).squeeze())
-                   print "Nic's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r2).squeeze())
-                   print "Skl's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r3).squeeze())
-               raise ValueError("Different results")
-  
-        # debais to remove small entries
-        for nn in idx_r:
-          if abs(x_r[nn]) < 1e-10:
-            x_r[nn] = 0
-  
-        # exact recovery condition using support
-        #if sorted(np.flatnonzero(x_r)) == sorted(np.flatnonzero(x)):
-        #  successCounter += 1
-        # exact recovery condition using error in solution
-        error = x - x_r
-        """ the following is the exact recovery condition in: A. Maleki 
-              and D. L. Donoho, "Optimally tuned iterative reconstruction 
-              algorithms for compressed sensing," IEEE J. Selected Topics 
-              in Signal Process., vol. 4, pp. 330-341, Apr. 2010. """
-        if np.vdot(error,error) < np.vdot(x,x)*1e-4:
-          successCounter += 1
-        trial -= 1
-  
-      success[rhoCounter,deltaCounter] = successCounter
-      if successCounter == 0:
-        break
-  
-      rhoCounter += 1
-      #np.savetxt('test.txt',success,fmt='#2.1d',delimiter=',')
-    deltaCounter += 1
-    
-if __name__ == "__main__":
-    
-    unittest.main(verbosity=2)    
-    #suite = unittest.TestLoader().loadTestsFromTestCase(CompareResults)
-    #unittest.TextTestRunner(verbosity=2).run(suite)    
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/apps/omp_app.py	Fri Oct 21 13:53:49 2011 +0000
@@ -0,0 +1,149 @@
+""" 
+#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#
+# Bob L. Sturm <bst@create.aau.dk> 20111018
+# Department of Architecture, Design and Media Technology
+# Aalborg University Copenhagen
+# Lautrupvang 15, 2750 Ballerup, Denmark
+#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#
+"""
+
+import numpy as np
+from sklearn.utils import check_random_state
+import time
+
+from omp_sk_bugfix import orthogonal_mp
+from omp_QR import greed_omp_qr
+from omp_QR import omp_qr
+
+"""
+Run a problem suite involving sparse vectors in 
+ambientDimension dimensional space, with a resolution
+in the phase plane of numGradations x numGradations,
+and at each indeterminacy and sparsity pair run
+numTrials independent trials.
+
+Outputs a text file denoting successes at each phase point.
+For more on phase transitions, see:
+D. L. Donoho and J. Tanner, "Precise undersampling theorems," 
+Proc. IEEE, vol. 98, no. 6, pp. 913-924, June 2010.
+"""
+
+def runProblemSuite(ambientDimension,numGradations,numTrials):
+
+  idx = np.arange(ambientDimension)
+  phaseDelta = np.linspace(0.05,1,numGradations)
+  phaseRho = np.linspace(0.05,1,numGradations)
+  success = np.zeros((numGradations, numGradations))
+  
+  #Nic: init timers
+  t1all = 0
+  t2all = 0
+  t3all = 0
+  
+  deltaCounter = 0
+  # delta is number of measurements/
+  for delta in phaseDelta[:17]:
+    rhoCounter = 0
+    for rho in phaseRho:
+      print(deltaCounter,rhoCounter)
+      numMeasurements = int(delta*ambientDimension)
+      sparsity = int(rho*numMeasurements)
+      # how do I set the following to be random each time?
+      generator = check_random_state(100)
+      # create unit norm dictionary
+      D = generator.randn(numMeasurements, ambientDimension)
+      D /= np.sqrt(np.sum((D ** 2), axis=0))
+      # compute Gramian (for efficiency)
+      DTD = np.dot(D.T,D)
+  
+      successCounter = 0
+      trial = numTrials
+      while trial > 0:
+        # generate sparse signal with a minimum non-zero value
+        x = np.zeros((ambientDimension, 1))
+        idx2 = idx
+        generator.shuffle(idx2)
+        idx3 = idx2[:sparsity]
+        while np.min(np.abs(x[idx3,0])) < 1e-10 :
+           x[idx3,0] = generator.randn(sparsity)
+        # sense sparse signal
+        y = np.dot(D, x)
+        
+        # Nic: Use sparsify OMP function (translated from Matlab)
+        ompopts = dict({'stopCrit':'M', 'stopTol':2*sparsity})
+        starttime = time.time()                     # start timer
+        x_r2, errs, times = greed_omp_qr(y.squeeze().copy(), D.copy(), D.shape[1], ompopts)
+        t2all = t2all + time.time() - starttime     # stop timer
+        idx_r2 = np.nonzero(x_r2)[0]        
+        
+        # run to two times expected sparsity, or tolerance
+        # why? Often times, OMP can retrieve the correct solution
+        # when it is run for more than the expected sparsity
+        #x_r, idx_r = omp_qr(y,D,DTD,2*sparsity,1e-5)
+        # Nic: adjust tolerance to match with other function
+        starttime = time.time()                     # start timer
+        x_r, idx_r = omp_qr(y.copy(),D.copy(),DTD.copy(),2*sparsity,numMeasurements*1e-14/np.vdot(y,y))
+        t1all = t1all + time.time() - starttime     # stop timer        
+        
+        # Nic: test sklearn omp
+        starttime = time.time()                     # start timer
+        x_r3 = orthogonal_mp(D.copy(), y.copy(), n_nonzero_coefs=2*sparsity, tol=numMeasurements*1e-14, precompute_gram=False, copy_X=True)
+        idx_r3 = np.nonzero(x_r3)[0]
+        t3all = t3all + time.time() - starttime     # stop timer        
+        
+        # Nic: compare results
+        print 'diff1 = ',np.linalg.norm(x_r.squeeze() - x_r2.squeeze())
+        print 'diff2 = ',np.linalg.norm(x_r.squeeze() - x_r3.squeeze())
+        print 'diff3 = ',np.linalg.norm(x_r2.squeeze() - x_r3.squeeze())
+        print "Bob's total time = ", t1all
+        print "Nic's total time = ", t2all
+        print "Skl's total time = ", t3all
+        if np.linalg.norm(x_r.squeeze() - x_r2.squeeze()) > 1e-10 or \
+           np.linalg.norm(x_r.squeeze() - x_r3.squeeze()) > 1e-10 or \
+           np.linalg.norm(x_r2.squeeze() - x_r3.squeeze()) > 1e-10:
+            print "STOP: Different results"
+            print "Bob's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r).squeeze())
+            print "Nic's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r2).squeeze())
+            print "Skl's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r3).squeeze())
+            raise ValueError("Different results")
+  
+        # debais to remove small entries
+        for nn in idx_r:
+          if abs(x_r[nn]) < 1e-10:
+            x_r[nn] = 0
+  
+        # exact recovery condition using support
+        #if sorted(np.flatnonzero(x_r)) == sorted(np.flatnonzero(x)):
+        #  successCounter += 1
+        # exact recovery condition using error in solution
+        error = x - x_r
+        """ the following is the exact recovery condition in: A. Maleki 
+              and D. L. Donoho, "Optimally tuned iterative reconstruction 
+              algorithms for compressed sensing," IEEE J. Selected Topics 
+              in Signal Process., vol. 4, pp. 330-341, Apr. 2010. """
+        if np.vdot(error,error) < np.vdot(x,x)*1e-4:
+          successCounter += 1
+        trial -= 1
+  
+      success[rhoCounter,deltaCounter] = successCounter
+      if successCounter == 0:
+        break
+  
+      rhoCounter += 1
+      #np.savetxt('test.txt',success,fmt='#2.1d',delimiter=',')
+    deltaCounter += 1
+
+if __name__ == '__main__':
+  print ('Running problem suite')
+  ambientDimension = 400
+  numGradations = 30
+  numTrials = 1
+  
+  #import cProfile
+  #cProfile.run('runProblemSuite(ambientDimension,numGradations,numTrials)','profres')  
+  runProblemSuite(ambientDimension,numGradations,numTrials) 
+  print "Done"
+  
+  #import pstats
+  #p = pstats.Stats('D:\Nic\Dev2\profres')
+  #p.sort_stats('cumulative').print_stats(10)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matlab/BP/cgsolve.m	Fri Oct 21 13:53:49 2011 +0000
@@ -0,0 +1,76 @@
+% cgsolve.m
+%
+% Solve a symmetric positive definite system Ax = b via conjugate gradients.
+%
+% Usage: [x, res, iter] = cgsolve(A, b, tol, maxiter, verbose)
+%
+% A - Either an NxN matrix, or a function handle.
+%
+% b - N vector
+%
+% tol - Desired precision.  Algorithm terminates when 
+%    norm(Ax-b)/norm(b) < tol .
+%
+% maxiter - Maximum number of iterations.
+%
+% verbose - If 0, do not print out progress messages.
+%    If and integer greater than 0, print out progress every 'verbose' iters.
+%
+% Written by: Justin Romberg, Caltech
+% Email: jrom@acm.caltech.edu
+% Created: October 2005
+%
+
+function [x, res, iter] = cgsolve(A, b, tol, maxiter, verbose)
+
+if (nargin < 5), verbose = 1; end
+
+implicit = isa(A,'function_handle');
+
+x = zeros(length(b),1);
+r = b;
+d = r;
+delta = r'*r;
+delta0 = b'*b;
+numiter = 0;
+bestx = x;
+bestres = sqrt(delta/delta0); 
+while ((numiter < maxiter) && (delta > tol^2*delta0))
+
+  % q = A*d
+  if (implicit), q = A(d);  else  q = A*d;  end
+ 
+  alpha = delta/(d'*q);
+  x = x + alpha*d;
+  
+  if (mod(numiter+1,50) == 0)
+    % r = b - Aux*x
+    if (implicit), r = b - A(x);  else  r = b - A*x;  end
+  else
+    r = r - alpha*q;
+  end
+  
+  deltaold = delta;
+  delta = r'*r;
+  beta = delta/deltaold;
+  d = r + beta*d;
+  numiter = numiter + 1;
+  if (sqrt(delta/delta0) < bestres)
+    bestx = x;
+    bestres = sqrt(delta/delta0);
+  end    
+  
+  if ((verbose) && (mod(numiter,verbose)==0))
+    disp(sprintf('cg: Iter = %d, Best residual = %8.3e, Current residual = %8.3e', ...
+      numiter, bestres, sqrt(delta/delta0)));
+  end
+  
+end
+
+if (verbose)
+  disp(sprintf('cg: Iterations = %d, best residual = %14.8e', numiter, bestres));
+end
+x = bestx;
+res = bestres;
+iter = numiter;
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matlab/BP/l1qc_logbarrier.m	Fri Oct 21 13:53:49 2011 +0000
@@ -0,0 +1,121 @@
+% l1qc_logbarrier.m
+%
+% Solve quadratically constrained l1 minimization:
+% min ||x||_1   s.t.  ||Ax - b||_2 <= \epsilon
+%
+% Reformulate as the second-order cone program
+% min_{x,u}  sum(u)   s.t.    x - u <= 0,
+%                            -x - u <= 0,
+%      1/2(||Ax-b||^2 - \epsilon^2) <= 0
+% and use a log barrier algorithm.
+%
+% Usage:  xp = l1qc_logbarrier(x0, A, At, b, epsilon, lbtol, mu, cgtol, cgmaxiter)
+%
+% x0 - Nx1 vector, initial point.
+%
+% A - Either a handle to a function that takes a N vector and returns a K 
+%     vector , or a KxN matrix.  If A is a function handle, the algorithm
+%     operates in "largescale" mode, solving the Newton systems via the
+%     Conjugate Gradients algorithm.
+%
+% At - Handle to a function that takes a K vector and returns an N vector.
+%      If A is a KxN matrix, At is ignored.
+%
+% b - Kx1 vector of observations.
+%
+% epsilon - scalar, constraint relaxation parameter
+%
+% lbtol - The log barrier algorithm terminates when the duality gap <= lbtol.
+%         Also, the number of log barrier iterations is completely
+%         determined by lbtol.
+%         Default = 1e-3.
+%
+% mu - Factor by which to increase the barrier constant at each iteration.
+%      Default = 10.
+%
+% cgtol - Tolerance for Conjugate Gradients; ignored if A is a matrix.
+%     Default = 1e-8.
+%
+% cgmaxiter - Maximum number of iterations for Conjugate Gradients; ignored
+%     if A is a matrix.
+%     Default = 200.
+%
+% Written by: Justin Romberg, Caltech
+% Email: jrom@acm.caltech.edu
+% Created: October 2005
+%
+
+function xp = l1qc_logbarrier(x0, A, At, b, epsilon, lbtol, mu, cgtol, cgmaxiter)  
+
+largescale = isa(A,'function_handle');
+
+if (nargin < 6), lbtol = 1e-3; end
+if (nargin < 7), mu = 10; end
+if (nargin < 8), cgtol = 1e-8; end
+if (nargin < 9), cgmaxiter = 200; end
+
+newtontol = lbtol;
+newtonmaxiter = 50;
+
+N = length(x0);
+
+% starting point --- make sure that it is feasible
+if (largescale)
+  if (norm(A(x0)-b) > epsilon)
+    disp('Starting point infeasible; using x0 = At*inv(AAt)*y.');
+    AAt = @(z) A(At(z));
+    w = cgsolve(AAt, b, cgtol, cgmaxiter, 0);
+    if (cgres > 1/2)
+      disp('A*At is ill-conditioned: cannot find starting point');
+      xp = x0;
+      return;
+    end
+    x0 = At(w);
+  end
+else
+  if (norm(A*x0-b) > epsilon)
+    disp('Starting point infeasible; using x0 = At*inv(AAt)*y.');
+    opts.POSDEF = true; opts.SYM = true;
+    [w, hcond] = linsolve(A*A', b, opts);
+    if (hcond < 1e-14)
+      disp('A*At is ill-conditioned: cannot find starting point');
+      xp = x0;
+      return;
+    end
+    x0 = A'*w;
+  end  
+end
+x = x0;
+u = (0.95)*abs(x0) + (0.10)*max(abs(x0));
+
+disp(sprintf('Original l1 norm = %.3f, original functional = %.3f', sum(abs(x0)), sum(u)));
+
+% choose initial value of tau so that the duality gap after the first
+% step will be about the origial norm
+tau = max((2*N+1)/sum(abs(x0)), 1);
+                                                                                                                          
+lbiter = ceil((log(2*N+1)-log(lbtol)-log(tau))/log(mu));
+disp(sprintf('Number of log barrier iterations = %d\n', lbiter));
+
+totaliter = 0;
+
+% Added by Nic
+if lbiter == 0
+    xp = zeros(size(x0));
+end
+
+for ii = 1:lbiter
+
+  [xp, up, ntiter] = l1qc_newton(x, u, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter);
+  totaliter = totaliter + ntiter;
+  
+  disp(sprintf('\nLog barrier iter = %d, l1 = %.3f, functional = %8.3f, tau = %8.3e, total newton iter = %d\n', ...
+    ii, sum(abs(xp)), sum(up), tau, totaliter));
+  
+  x = xp;
+  u = up;
+ 
+  tau = mu*tau;
+  
+end
+                   
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matlab/BP/l1qc_newton.m	Fri Oct 21 13:53:49 2011 +0000
@@ -0,0 +1,150 @@
+% l1qc_newton.m
+%
+% Newton algorithm for log-barrier subproblems for l1 minimization
+% with quadratic constraints.
+%
+% Usage: 
+% [xp,up,niter] = l1qc_newton(x0, u0, A, At, b, epsilon, tau, 
+%                             newtontol, newtonmaxiter, cgtol, cgmaxiter)
+%
+% x0,u0 - starting points
+%
+% A - Either a handle to a function that takes a N vector and returns a K 
+%     vector , or a KxN matrix.  If A is a function handle, the algorithm
+%     operates in "largescale" mode, solving the Newton systems via the
+%     Conjugate Gradients algorithm.
+%
+% At - Handle to a function that takes a K vector and returns an N vector.
+%      If A is a KxN matrix, At is ignored.
+%
+% b - Kx1 vector of observations.
+%
+% epsilon - scalar, constraint relaxation parameter
+%
+% tau - Log barrier parameter.
+%
+% newtontol - Terminate when the Newton decrement is <= newtontol.
+%         Default = 1e-3.
+%
+% newtonmaxiter - Maximum number of iterations.
+%         Default = 50.
+%
+% cgtol - Tolerance for Conjugate Gradients; ignored if A is a matrix.
+%     Default = 1e-8.
+%
+% cgmaxiter - Maximum number of iterations for Conjugate Gradients; ignored
+%     if A is a matrix.
+%     Default = 200.
+%
+% Written by: Justin Romberg, Caltech
+% Email: jrom@acm.caltech.edu
+% Created: October 2005
+%
+
+
+function [xp, up, niter] = l1qc_newton(x0, u0, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter) 
+
+% check if the matrix A is implicit or explicit
+largescale = isa(A,'function_handle');
+
+% line search parameters
+alpha = 0.01;
+beta = 0.5;  
+
+if (~largescale), AtA = A'*A; end
+
+% initial point
+x = x0;
+u = u0;
+if (largescale), r = A(x) - b; else  r = A*x - b; end
+fu1 = x - u;
+fu2 = -x - u;
+fe = 1/2*(r'*r - epsilon^2);
+f = sum(u) - (1/tau)*(sum(log(-fu1)) + sum(log(-fu2)) + log(-fe));
+
+niter = 0;
+done = 0;
+while (~done)
+  
+  if (largescale), atr = At(r); else  atr = A'*r; end
+  
+  ntgz = 1./fu1 - 1./fu2 + 1/fe*atr;
+  ntgu = -tau - 1./fu1 - 1./fu2;
+  gradf = -(1/tau)*[ntgz; ntgu];
+  
+  sig11 = 1./fu1.^2 + 1./fu2.^2;
+  sig12 = -1./fu1.^2 + 1./fu2.^2;
+  sigx = sig11 - sig12.^2./sig11;
+    
+  w1p = ntgz - sig12./sig11.*ntgu;
+  if (largescale)
+    h11pfun = @(z) sigx.*z - (1/fe)*At(A(z)) + 1/fe^2*(atr'*z)*atr;
+    [dx, cgres, cgiter] = cgsolve(h11pfun, w1p, cgtol, cgmaxiter, 0);
+    if (cgres > 1/2)
+      disp('Cannot solve system.  Returning previous iterate.  (See Section 4 of notes for more information.)');
+      xp = x;  up = u;
+      return
+    end
+    Adx = A(dx);
+  else
+    H11p = diag(sigx) - (1/fe)*AtA + (1/fe)^2*atr*atr';
+    opts.POSDEF = true; opts.SYM = true;
+    [dx,hcond] = linsolve(H11p, w1p, opts);
+    if (hcond < 1e-14)
+      disp('Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)');
+      xp = x;  up = u;
+      return
+    end
+    Adx = A*dx;
+  end
+  du = (1./sig11).*ntgu - (sig12./sig11).*dx;  
+ 
+  % minimum step size that stays in the interior
+  ifu1 = find((dx-du) > 0); ifu2 = find((-dx-du) > 0);
+  aqe = Adx'*Adx;   bqe = 2*r'*Adx;   cqe = r'*r - epsilon^2;
+  smax = min(1,min([...
+    -fu1(ifu1)./(dx(ifu1)-du(ifu1)); -fu2(ifu2)./(-dx(ifu2)-du(ifu2)); ...
+    (-bqe+sqrt(bqe^2-4*aqe*cqe))/(2*aqe)
+    ]));
+  s = (0.99)*smax;
+  
+  % backtracking line search
+  suffdec = 0;
+  backiter = 0;
+  while (~suffdec)
+    xp = x + s*dx;  up = u + s*du;  rp = r + s*Adx;
+    fu1p = xp - up;  fu2p = -xp - up;  fep = 1/2*(rp'*rp - epsilon^2);
+    fp = sum(up) - (1/tau)*(sum(log(-fu1p)) + sum(log(-fu2p)) + log(-fep));
+    flin = f + alpha*s*(gradf'*[dx; du]);
+    suffdec = (fp <= flin);
+    s = beta*s;
+    backiter = backiter + 1;
+    if (backiter > 32)
+      disp('Stuck on backtracking line search, returning previous iterate.  (See Section 4 of notes for more information.)');
+      xp = x;  up = u;
+      return
+    end
+  end
+  
+  % set up for next iteration
+  x = xp; u = up;  r = rp;
+  fu1 = fu1p;  fu2 = fu2p;  fe = fep;  f = fp;
+  
+  lambda2 = -(gradf'*[dx; du]);
+  stepsize = s*norm([dx; du]);
+  niter = niter + 1;
+  done = (lambda2/2 < newtontol) | (niter >= newtonmaxiter);
+  
+  disp(sprintf('Newton iter = %d, Functional = %8.3f, Newton decrement = %8.3f, Stepsize = %8.3e', ...
+    niter, f, lambda2/2, stepsize));
+  if (largescale)
+    disp(sprintf('                CG Res = %8.3e, CG Iter = %d', cgres, cgiter));
+  else
+    disp(sprintf('                  H11p condition number = %8.3e', hcond));
+  end
+      
+end
+
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matlab/OMP/greed_omp_qr.m	Fri Oct 21 13:53:49 2011 +0000
@@ -0,0 +1,472 @@
+function [s, err_mse, iter_time]=greed_omp_qr(x,A,m,varargin)
+% greed_omp_qr: Orthogonal Matching Pursuit algorithm based on QR
+% factorisation
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Usage
+% [s, err_mse, iter_time]=greed_omp_qr(x,P,m,'option_name','option_value')
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Input
+%   Mandatory:
+%               x   Observation vector to be decomposed
+%               P   Either:
+%                       1) An nxm matrix (n must be dimension of x)
+%                       2) A function handle (type "help function_format" 
+%                          for more information)
+%                          Also requires specification of P_trans option.
+%                       3) An object handle (type "help object_format" for 
+%                          more information)
+%               m   length of s 
+%
+%   Possible additional options:
+%   (specify as many as you want using 'option_name','option_value' pairs)
+%   See below for explanation of options:
+%__________________________________________________________________________
+%   option_name    |     available option_values                | default
+%--------------------------------------------------------------------------
+%   stopCrit       | M, corr, mse, mse_change                   | M
+%   stopTol        | number (see below)                         | n/4
+%   P_trans        | function_handle (see below)                | 
+%   maxIter        | positive integer (see below)               | n
+%   verbose        | true, false                                | false
+%   start_val      | vector of length m                         | zeros
+%
+%   Available stopping criteria :
+%               M           -   Extracts exactly M = stopTol elements.
+%               corr        -   Stops when maximum correlation between
+%                               residual and atoms is below stopTol value.
+%               mse         -   Stops when mean squared error of residual 
+%                               is below stopTol value.
+%               mse_change  -   Stops when the change in the mean squared 
+%                               error falls below stopTol value.
+%
+%   stopTol: Value for stopping criterion.
+%
+%   P_trans: If P is a function handle, then P_trans has to be specified and 
+%            must be a function handle. 
+%
+%   maxIter: Maximum number of allowed iterations.
+%
+%   verbose: Logical value to allow algorithm progress to be displayed.
+%
+%   start_val: Allows algorithms to start from partial solution.
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Outputs
+%    s              Solution vector 
+%    err_mse        Vector containing mse of approximation error for each 
+%                   iteration
+%    iter_time      Vector containing computation times for each iteration
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Description
+%   greed_omp_qr performs a greedy signal decomposition. 
+%   In each iteration a new element is selected depending on the inner
+%   product between the current residual and columns in P.
+%   The non-zero elements of s are approximated by orthogonally projecting 
+%   x onto the selected elements in each iteration.
+%   The algorithm uses QR decomposition.
+%
+% See Also
+%   greed_omp_chol, greed_omp_cg, greed_omp_cgp, greed_omp_pinv, 
+%   greed_omp_linsolve, greed_gp, greed_nomp
+%
+% Copyright (c) 2007 Thomas Blumensath
+%
+% The University of Edinburgh
+% Email: thomas.blumensath@ed.ac.uk
+% Comments and bug reports welcome
+%
+% This file is part of sparsity Version 0.1
+% Created: April 2007
+%
+% Part of this toolbox was developed with the support of EPSRC Grant
+% D000246/1
+%
+% Please read COPYRIGHT.m for terms and conditions.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%                    Default values and initialisation
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+[n1 n2]=size(x);
+if n2 == 1
+    n=n1;
+elseif n1 == 1
+    x=x';
+    n=n2;
+else
+   display('x must be a vector.');
+   return
+end
+    
+sigsize     = x'*x/n;
+initial_given=0;
+err_mse     = [];
+iter_time   = [];
+STOPCRIT    = 'M';
+STOPTOL     = ceil(n/4);
+MAXITER     = n;
+verbose     = false;
+s_initial   = zeros(m,1);
+
+
+if verbose
+   display('Initialising...') 
+end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%                           Output variables
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+switch nargout 
+    case 3
+        comp_err=true;
+        comp_time=true;
+    case 2 
+        comp_err=true;
+        comp_time=false;
+    case 1
+        comp_err=false;
+        comp_time=false;
+    case 0
+        error('Please assign output variable.')
+    otherwise
+        error('Too many output arguments specified')
+end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%                       Look through options
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% Put option into nice format
+Options={};
+OS=nargin-3;
+c=1;
+for i=1:OS
+    if isa(varargin{i},'cell')
+        CellSize=length(varargin{i});
+        ThisCell=varargin{i};
+        for j=1:CellSize
+            Options{c}=ThisCell{j};
+            c=c+1;
+        end
+    else
+        Options{c}=varargin{i};
+        c=c+1;
+    end
+end
+OS=length(Options);
+if rem(OS,2)
+   error('Something is wrong with argument name and argument value pairs.') 
+end
+
+for i=1:2:OS
+   switch Options{i}
+        case {'stopCrit'}
+            if (strmatch(Options{i+1},{'M'; 'corr'; 'mse'; 'mse_change'},'exact'));
+                STOPCRIT    = Options{i+1};  
+            else error('stopCrit must be char string [M, corr, mse, mse_change]. Exiting.'); end 
+        case {'stopTol'}
+            if isa(Options{i+1},'numeric') ; STOPTOL     = Options{i+1};   
+            else error('stopTol must be number. Exiting.'); end
+        case {'P_trans'} 
+            if isa(Options{i+1},'function_handle'); Pt = Options{i+1};   
+            else error('P_trans must be function _handle. Exiting.'); end
+        case {'maxIter'}
+            if isa(Options{i+1},'numeric'); MAXITER     = Options{i+1};             
+            else error('maxIter must be a number. Exiting.'); end
+        case {'verbose'}
+            if isa(Options{i+1},'logical'); verbose     = Options{i+1};   
+            else error('verbose must be a logical. Exiting.'); end 
+        case {'start_val'}
+            if isa(Options{i+1},'numeric') & length(Options{i+1}) == m ;
+                s_initial     = Options{i+1};   
+                initial_given=1;
+            else error('start_val must be a vector of length m. Exiting.'); end
+        otherwise
+            error('Unrecognised option. Exiting.') 
+   end
+end
+
+
+
+if strcmp(STOPCRIT,'M') 
+    maxM=STOPTOL;
+else
+    maxM=MAXITER;
+end
+
+if nargout >=2
+    err_mse = zeros(maxM,1);
+end
+if nargout ==3
+    iter_time = zeros(maxM,1);
+end
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%                        Make P and Pt functions
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+if          isa(A,'float')      P =@(z) A*z;  Pt =@(z) A'*z;
+elseif      isobject(A)         P =@(z) A*z;  Pt =@(z) A'*z;
+elseif      isa(A,'function_handle') 
+    try
+        if          isa(Pt,'function_handle'); P=A;
+        else        error('If P is a function handle, Pt also needs to be a function handle. Exiting.'); end
+    catch error('If P is a function handle, Pt needs to be specified. Exiting.'); end
+else        error('P is of unsupported type. Use matrix, function_handle or object. Exiting.'); end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%                 Random Check to see if dictionary is normalised 
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+        % Commented by Nic, 13 Sept 2011
+        % Don't want any slow text output
+%         mask=zeros(m,1);
+%         mask(ceil(rand*m))=1;
+%         nP=norm(P(mask));
+%         if abs(1-nP)>1e-3;
+%             display('Dictionary appears not to have unit norm columns.')
+%         end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%              Check if we have enough memory and initialise 
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        
+
+        try Q=zeros(n,maxM);
+        catch error('Variable size is too large. Please try greed_omp_chol algorithm or reduce MAXITER.')
+        end 
+        try R=zeros(maxM);
+        catch error('Variable size is too large. Please try greed_omp_chol algorithm or reduce MAXITER.')
+        end
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%                        Do we start from zero or not?
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+if initial_given ==1;
+    IN          = find(s_initial);
+    if ~isempty(IN)
+        Residual    = x-P(s_initial);
+        lengthIN=length(IN);
+        z=[];
+        for k=1:length(IN)
+            % Extract new element
+             mask=zeros(m,1);
+             mask(IN(k))=1;
+             new_element=P(mask);
+
+            % Orthogonalise new element 
+             qP=Q(:,1:k-1)'*new_element;
+             q=new_element-Q(:,1:k-1)*(qP);
+
+             nq=norm(q);
+             q=q/nq;
+            % Update QR factorisation 
+             R(1:k-1,k)=qP;
+             R(k,k)=nq;
+             Q(:,k)=q;
+
+             z(k)=q'*x;
+        end
+        s           = s_initial;
+        Residual=x-Q(:,k)*z;
+        oldERR      = Residual'*Residual/n;
+    else
+    	IN          = [];
+        Residual    = x;
+        s           = s_initial;
+        sigsize     = x'*x/n;
+        oldERR      = sigsize;
+        k=0;
+        z=[];
+    end
+    
+else
+    IN          = [];
+    Residual    = x;
+    s           = s_initial;
+    sigsize     = x'*x/n;
+    oldERR      = sigsize;
+    k=0;
+    z=[];
+end
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%                        Main algorithm
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+if verbose
+   display('Main iterations...') 
+end
+tic
+t=0;
+DR=Pt(Residual);
+done = 0;
+iter=1;
+
+%Nic: find norms of dictionary atoms, for RandOMP
+% for i = 1:m
+%     norms(i) = norm(P([zeros(i-1,1) ; 1 ; zeros(m-i,1)]));
+% end
+
+while ~done
+    
+     % Select new element
+     DR(IN)=0;
+     % Nic: replace selection with random variable
+     % i.e. Randomized OMP!!
+     % DON'T FORGET ABOUT THIS!!
+     [v I]=max(abs(DR));
+     %I = randp(exp(abs(DR).^2 ./ (norms.^2)'), [1 1]);
+     IN=[IN I];
+
+    
+     k=k+1;
+     % Extract new element
+     mask=zeros(m,1);
+     mask(IN(k))=1;
+     new_element=P(mask);
+
+    % Orthogonalise new element 
+     qP=Q(:,1:k-1)'*new_element;
+     q=new_element-Q(:,1:k-1)*(qP);
+
+     nq=norm(q);
+     q=q/nq;
+    % Update QR factorisation 
+     R(1:k-1,k)=qP;
+     R(k,k)=nq;
+     Q(:,k)=q;
+
+     z(k)=q'*x;
+   
+    % New residual 
+     Residual=Residual-q*(z(k));
+     DR=Pt(Residual);
+     
+     ERR=Residual'*Residual/n;
+     if comp_err
+         err_mse(iter)=ERR;
+     end
+     
+     if comp_time
+         iter_time(iter)=toc;
+     end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%                        Are we done yet?
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+     
+     if strcmp(STOPCRIT,'M')
+         if iter >= STOPTOL
+             done =1;
+         elseif verbose && toc-t>10
+            display(sprintf('Iteration %i. --- %i iterations to go',iter ,STOPTOL-iter)) 
+            t=toc;
+         end
+    elseif strcmp(STOPCRIT,'mse')
+         if comp_err
+            if err_mse(iter)<STOPTOL;
+                done = 1; 
+            elseif verbose && toc-t>10
+                display(sprintf('Iteration %i. --- %i mse',iter ,err_mse(iter))) 
+                t=toc;
+            end
+         else
+             if ERR<STOPTOL;
+                done = 1; 
+             elseif verbose && toc-t>10
+                display(sprintf('Iteration %i. --- %i mse',iter ,ERR)) 
+                t=toc;
+             end
+         end
+     elseif strcmp(STOPCRIT,'mse_change') && iter >=2
+         if comp_err && iter >=2
+              if ((err_mse(iter-1)-err_mse(iter))/sigsize <STOPTOL);
+                done = 1; 
+             elseif verbose && toc-t>10
+                display(sprintf('Iteration %i. --- %i mse change',iter ,(err_mse(iter-1)-err_mse(iter))/sigsize )) 
+                t=toc;
+             end
+         else
+             if ((oldERR - ERR)/sigsize < STOPTOL);
+                done = 1; 
+             elseif verbose && toc-t>10
+                display(sprintf('Iteration %i. --- %i mse change',iter ,(oldERR - ERR)/sigsize)) 
+                t=toc;
+             end
+         end
+     elseif strcmp(STOPCRIT,'corr') 
+          if max(abs(DR)) < STOPTOL;
+             done = 1; 
+          elseif verbose && toc-t>10
+                display(sprintf('Iteration %i. --- %i corr',iter ,max(abs(DR)))) 
+                t=toc;
+          end
+     end
+     
+    % Also stop if residual gets too small or maxIter reached
+     if comp_err
+         if err_mse(iter)<1e-16
+             display('Stopping. Exact signal representation found!')
+             done=1;
+         end
+     else
+
+
+         if iter>1
+             if ERR<1e-16
+                 display('Stopping. Exact signal representation found!')
+                 done=1;
+             end
+         end
+     end
+
+     if iter >= MAXITER
+         display('Stopping. Maximum number of iterations reached!')
+         done = 1; 
+     end
+     
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%                    If not done, take another round
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+   
+     if ~done
+        iter=iter+1;
+        oldERR=ERR;
+     end
+end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%            Now we can solve for s by back-substitution
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ s(IN)=R(1:k,1:k)\z(1:k)';
+ 
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%                  Only return as many elements as iterations
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+if nargout >=2
+    err_mse = err_mse(1:iter);
+end
+if nargout ==3
+    iter_time = iter_time(1:iter);
+end
+if verbose
+   display('Done') 
+end
+
+% Change history
+%
+% 8 of Februray: Algo does no longer stop if dictionary is not normaliesd.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyCSalgos/BP/l1qc.py	Fri Oct 21 13:53:49 2011 +0000
@@ -0,0 +1,705 @@
+
+import numpy as np
+import scipy.linalg
+import math
+
+
+#function [x, res, iter] = cgsolve(A, b, tol, maxiter, verbose)
+def cgsolve(A, b, tol, maxiter, verbose=1):
+    # Solve a symmetric positive definite system Ax = b via conjugate gradients.
+    #
+    # Usage: [x, res, iter] = cgsolve(A, b, tol, maxiter, verbose)
+    #
+    # A - Either an NxN matrix, or a function handle.
+    #
+    # b - N vector
+    #
+    # tol - Desired precision.  Algorithm terminates when 
+    #    norm(Ax-b)/norm(b) < tol .
+    #
+    # maxiter - Maximum number of iterations.
+    #
+    # verbose - If 0, do not print out progress messages.
+    #    If and integer greater than 0, print out progress every 'verbose' iters.
+    #
+    # Written by: Justin Romberg, Caltech
+    # Email: jrom@acm.caltech.edu
+    # Created: October 2005
+    #
+
+    #---------------------
+    # Original Matab code:
+    #        
+    #if (nargin < 5), verbose = 1; end
+    #
+    #implicit = isa(A,'function_handle');
+    #
+    #x = zeros(length(b),1);
+    #r = b;
+    #d = r;
+    #delta = r'*r;
+    #delta0 = b'*b;
+    #numiter = 0;
+    #bestx = x;
+    #bestres = sqrt(delta/delta0); 
+    #while ((numiter < maxiter) && (delta > tol^2*delta0))
+    #
+    #  # q = A*d
+    #  if (implicit), q = A(d);  else  q = A*d;  end
+    # 
+    #  alpha = delta/(d'*q);
+    #  x = x + alpha*d;
+    #  
+    #  if (mod(numiter+1,50) == 0)
+    #    # r = b - Aux*x
+    #    if (implicit), r = b - A(x);  else  r = b - A*x;  end
+    #  else
+    #    r = r - alpha*q;
+    #  end
+    #  
+    #  deltaold = delta;
+    #  delta = r'*r;
+    #  beta = delta/deltaold;
+    #  d = r + beta*d;
+    #  numiter = numiter + 1;
+    #  if (sqrt(delta/delta0) < bestres)
+    #    bestx = x;
+    #    bestres = sqrt(delta/delta0);
+    #  end    
+    #  
+    #  if ((verbose) && (mod(numiter,verbose)==0))
+    #    disp(sprintf('cg: Iter = #d, Best residual = #8.3e, Current residual = #8.3e', ...
+    #      numiter, bestres, sqrt(delta/delta0)));
+    #  end
+    #  
+    #end
+    #
+    #if (verbose)
+    #  disp(sprintf('cg: Iterations = #d, best residual = #14.8e', numiter, bestres));
+    #end
+    #x = bestx;
+    #res = bestres;
+    #iter = numiter;
+
+    # End of original Matab code
+    #----------------------------
+    
+    #if (nargin < 5), verbose = 1; end
+    # Optional argument
+    
+    #implicit = isa(A,'function_handle');
+    if hasattr(A, '__call__'):
+        implicit = True
+    else:
+        implicit = False
+    
+    x = np.zeros(b.size)
+    r = b.copy()
+    d = r.copy()
+    delta = np.vdot(r,r)
+    delta0 = np.vdot(b,b)
+    numiter = 0
+    bestx = x.copy()
+    bestres = math.sqrt(delta/delta0)
+    while (numiter < maxiter) and (delta > tol**2*delta0):
+    
+      # q = A*d
+      #if (implicit), q = A(d);  else  q = A*d;  end
+      if implicit:
+          q = A(d)
+      else:
+          q = np.dot(A,d)
+     
+      alpha = delta/np.vdot(d,q)
+      x = x + alpha*d
+      
+      if divmod(numiter+1,50)[1] == 0:
+        # r = b - Aux*x
+        #if (implicit), r = b - A(x);  else  r = b - A*x;  end
+        if implicit:
+            r = b - A(x)
+        else:
+            r = b - np.dot(A,x)
+      else:
+        r = r - alpha*q
+      #end
+      
+      deltaold = delta;
+      delta = np.vdot(r,r)
+      beta = delta/deltaold;
+      d = r + beta*d
+      numiter = numiter + 1
+      if (math.sqrt(delta/delta0) < bestres):
+        bestx = x.copy()
+        bestres = math.sqrt(delta/delta0)
+      #end    
+      
+      if ((verbose) and (divmod(numiter,verbose)[1]==0)):
+        #disp(sprintf('cg: Iter = #d, Best residual = #8.3e, Current residual = #8.3e', ...
+        #  numiter, bestres, sqrt(delta/delta0)));
+        print 'cg: Iter = ',numiter,', Best residual = ',bestres,', Current residual = ',math.sqrt(delta/delta0)
+      #end
+      
+    #end
+    
+    if (verbose):
+      #disp(sprintf('cg: Iterations = #d, best residual = #14.8e', numiter, bestres));
+      print 'cg: Iterations = ',numiter,', best residual = ',bestres
+    #end
+    x = bestx.copy()
+    res = bestres
+    iter = numiter
+    
+    return x,res,iter
+
+
+
+#function [xp, up, niter] = l1qc_newton(x0, u0, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter) 
+def l1qc_newton(x0, u0, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter):
+    # Newton algorithm for log-barrier subproblems for l1 minimization
+    # with quadratic constraints.
+    #
+    # Usage: 
+    # [xp,up,niter] = l1qc_newton(x0, u0, A, At, b, epsilon, tau, 
+    #                             newtontol, newtonmaxiter, cgtol, cgmaxiter)
+    #
+    # x0,u0 - starting points
+    #
+    # A - Either a handle to a function that takes a N vector and returns a K 
+    #     vector , or a KxN matrix.  If A is a function handle, the algorithm
+    #     operates in "largescale" mode, solving the Newton systems via the
+    #     Conjugate Gradients algorithm.
+    #
+    # At - Handle to a function that takes a K vector and returns an N vector.
+    #      If A is a KxN matrix, At is ignored.
+    #
+    # b - Kx1 vector of observations.
+    #
+    # epsilon - scalar, constraint relaxation parameter
+    #
+    # tau - Log barrier parameter.
+    #
+    # newtontol - Terminate when the Newton decrement is <= newtontol.
+    #         Default = 1e-3.
+    #
+    # newtonmaxiter - Maximum number of iterations.
+    #         Default = 50.
+    #
+    # cgtol - Tolerance for Conjugate Gradients; ignored if A is a matrix.
+    #     Default = 1e-8.
+    #
+    # cgmaxiter - Maximum number of iterations for Conjugate Gradients; ignored
+    #     if A is a matrix.
+    #     Default = 200.
+    #
+    # Written by: Justin Romberg, Caltech
+    # Email: jrom@acm.caltech.edu
+    # Created: October 2005
+    #
+    
+    #---------------------
+    # Original Matab code:
+    
+    ## check if the matrix A is implicit or explicit
+    #largescale = isa(A,'function_handle');
+    #
+    ## line search parameters
+    #alpha = 0.01;
+    #beta = 0.5;  
+    #
+    #if (~largescale), AtA = A'*A; end
+    #
+    ## initial point
+    #x = x0;
+    #u = u0;
+    #if (largescale), r = A(x) - b; else  r = A*x - b; end
+    #fu1 = x - u;
+    #fu2 = -x - u;
+    #fe = 1/2*(r'*r - epsilon^2);
+    #f = sum(u) - (1/tau)*(sum(log(-fu1)) + sum(log(-fu2)) + log(-fe));
+    #
+    #niter = 0;
+    #done = 0;
+    #while (~done)
+    #  
+    #  if (largescale), atr = At(r); else  atr = A'*r; end
+    #  
+    #  ntgz = 1./fu1 - 1./fu2 + 1/fe*atr;
+    #  ntgu = -tau - 1./fu1 - 1./fu2;
+    #  gradf = -(1/tau)*[ntgz; ntgu];
+    #  
+    #  sig11 = 1./fu1.^2 + 1./fu2.^2;
+    #  sig12 = -1./fu1.^2 + 1./fu2.^2;
+    #  sigx = sig11 - sig12.^2./sig11;
+    #    
+    #  w1p = ntgz - sig12./sig11.*ntgu;
+    #  if (largescale)
+    #    h11pfun = @(z) sigx.*z - (1/fe)*At(A(z)) + 1/fe^2*(atr'*z)*atr;
+    #    [dx, cgres, cgiter] = cgsolve(h11pfun, w1p, cgtol, cgmaxiter, 0);
+    #    if (cgres > 1/2)
+    #      disp('Cannot solve system.  Returning previous iterate.  (See Section 4 of notes for more information.)');
+    #      xp = x;  up = u;
+    #      return
+    #    end
+    #    Adx = A(dx);
+    #  else
+    #    H11p = diag(sigx) - (1/fe)*AtA + (1/fe)^2*atr*atr';
+    #    opts.POSDEF = true; opts.SYM = true;
+    #    [dx,hcond] = linsolve(H11p, w1p, opts);
+    #    if (hcond < 1e-14)
+    #      disp('Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)');
+    #      xp = x;  up = u;
+    #      return
+    #    end
+    #    Adx = A*dx;
+    #  end
+    #  du = (1./sig11).*ntgu - (sig12./sig11).*dx;  
+    # 
+    #  # minimum step size that stays in the interior
+    #  ifu1 = find((dx-du) > 0); ifu2 = find((-dx-du) > 0);
+    #  aqe = Adx'*Adx;   bqe = 2*r'*Adx;   cqe = r'*r - epsilon^2;
+    #  smax = min(1,min([...
+    #    -fu1(ifu1)./(dx(ifu1)-du(ifu1)); -fu2(ifu2)./(-dx(ifu2)-du(ifu2)); ...
+    #    (-bqe+sqrt(bqe^2-4*aqe*cqe))/(2*aqe)
+    #    ]));
+    #  s = (0.99)*smax;
+    #  
+    #  # backtracking line search
+    #  suffdec = 0;
+    #  backiter = 0;
+    #  while (~suffdec)
+    #    xp = x + s*dx;  up = u + s*du;  rp = r + s*Adx;
+    #    fu1p = xp - up;  fu2p = -xp - up;  fep = 1/2*(rp'*rp - epsilon^2);
+    #    fp = sum(up) - (1/tau)*(sum(log(-fu1p)) + sum(log(-fu2p)) + log(-fep));
+    #    flin = f + alpha*s*(gradf'*[dx; du]);
+    #    suffdec = (fp <= flin);
+    #    s = beta*s;
+    #    backiter = backiter + 1;
+    #    if (backiter > 32)
+    #      disp('Stuck on backtracking line search, returning previous iterate.  (See Section 4 of notes for more information.)');
+    #      xp = x;  up = u;
+    #      return
+    #    end
+    #  end
+    #  
+    #  # set up for next iteration
+    #  x = xp; u = up;  r = rp;
+    #  fu1 = fu1p;  fu2 = fu2p;  fe = fep;  f = fp;
+    #  
+    #  lambda2 = -(gradf'*[dx; du]);
+    #  stepsize = s*norm([dx; du]);
+    #  niter = niter + 1;
+    #  done = (lambda2/2 < newtontol) | (niter >= newtonmaxiter);
+    #  
+    #  disp(sprintf('Newton iter = #d, Functional = #8.3f, Newton decrement = #8.3f, Stepsize = #8.3e', ...
+    #    niter, f, lambda2/2, stepsize));
+    #  if (largescale)
+    #    disp(sprintf('                CG Res = #8.3e, CG Iter = #d', cgres, cgiter));
+    #  else
+    #    disp(sprintf('                  H11p condition number = #8.3e', hcond));
+    #  end
+    #      
+    #end
+    
+    # End of original Matab code
+    #----------------------------
+    
+    # check if the matrix A is implicit or explicit
+    #largescale = isa(A,'function_handle');
+    if hasattr(A, '__call__'):
+        largescale = True
+    else:
+        largescale = False    
+    
+    # line search parameters
+    alpha = 0.01
+    beta = 0.5
+    
+    #if (~largescale), AtA = A'*A; end
+    if not largescale:
+        AtA = np.dot(A.T,A)
+    
+    # initial point
+    x = x0.copy()
+    u = u0.copy()
+    #if (largescale), r = A(x) - b; else  r = A*x - b; end
+    if largescale:
+        r = A(x) - b
+    else:
+        r = np.dot(A,x) - b
+        
+    fu1 = x - u
+    fu2 = -x - u
+    fe = 1/2*(np.vdot(r,r) - epsilon**2)
+    f = u.sum() - (1/tau)*(np.log(-fu1).sum() + np.log(-fu2).sum() + math.log(-fe))
+    
+    niter = 0
+    done = 0
+    while not done:
+      
+      #if (largescale), atr = At(r); else  atr = A'*r; end
+      if largescale:
+          atr = At(r)
+      else:
+          atr = np.dot(A.T,r)
+      
+      #ntgz = 1./fu1 - 1./fu2 + 1/fe*atr;
+      ntgz = 1.0/fu1 - 1.0/fu2 + 1.0/fe*atr
+      #ntgu = -tau - 1./fu1 - 1./fu2;
+      ntgu = -tau - 1.0/fu1 - 1.0/fu2
+      #gradf = -(1/tau)*[ntgz; ntgu];
+      gradf = -(1.0/tau)*np.concatenate((ntgz, ntgu),0)
+      
+      #sig11 = 1./fu1.^2 + 1./fu2.^2;
+      sig11 = 1.0/(fu1**2) + 1.0/(fu2**2)
+      #sig12 = -1./fu1.^2 + 1./fu2.^2;
+      sig12 = -1.0/(fu1**2) + 1.0/(fu2**2)
+      #sigx = sig11 - sig12.^2./sig11;
+      sigx = sig11 - (sig12**2)/sig11
+        
+      #w1p = ntgz - sig12./sig11.*ntgu;
+      w1p = ntgz - sig12/sig11*ntgu
+      if largescale:
+        #h11pfun = @(z) sigx.*z - (1/fe)*At(A(z)) + 1/fe^2*(atr'*z)*atr;
+        h11pfun = lambda z: sigx*z - (1.0/fe)*At(A(z)) + 1.0/(fe**2)*np.dot(np.dot(atr.T,z),atr)
+        dx,cgres,cgiter = cgsolve(h11pfun, w1p, cgtol, cgmaxiter, 0)
+        if (cgres > 1/2):
+          print 'Cannot solve system.  Returning previous iterate.  (See Section 4 of notes for more information.)'
+          xp = x.copy()
+          up = u.copy()
+          return xp,up,niter
+        #end
+        Adx = A(dx)
+      else:
+        #H11p = diag(sigx) - (1/fe)*AtA + (1/fe)^2*atr*atr';
+        H11p = np.diag(sigx) - (1/fe)*AtA + (1/fe)**2*np.dot(atr,atr.T)
+        #opts.POSDEF = true; opts.SYM = true;
+        #[dx,hcond] = linsolve(H11p, w1p, opts);
+        #if (hcond < 1e-14)
+        #  disp('Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)');
+        #  xp = x;  up = u;
+        #  return
+        #end
+        try:
+            dx = scipy.linalg.solve(H11p, w1p, sym_pos=True)
+            hcond = 1.0/scipy.linalg.cond(H11p)
+        except scipy.linalg.LinAlgError:
+            print 'Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)'
+            xp = x.copy()
+            up = u.copy()
+            return xp,up,niter
+        if hcond < 1e-14:
+            print 'Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)'
+            xp = x.copy()
+            up = u.copy()
+            return xp,up,niter
+        
+        #Adx = A*dx;
+        Adx = np.dot(A,dx)
+      #end
+      #du = (1./sig11).*ntgu - (sig12./sig11).*dx;  
+      du = (1.0/sig11)*ntgu - (sig12/sig11)*dx;
+     
+      # minimum step size that stays in the interior
+      #ifu1 = find((dx-du) > 0); ifu2 = find((-dx-du) > 0);
+      ifu1 = np.nonzero((dx-du)>0)
+      ifu2 = np.nonzero((-dx-du)>0)
+      #aqe = Adx'*Adx;   bqe = 2*r'*Adx;   cqe = r'*r - epsilon^2;
+      aqe = np.dot(Adx.T,Adx)
+      bqe = 2*np.dot(r.T,Adx)
+      cqe = np.vdot(r,r) - epsilon**2
+      #smax = min(1,min([...
+      #  -fu1(ifu1)./(dx(ifu1)-du(ifu1)); -fu2(ifu2)./(-dx(ifu2)-du(ifu2)); ...
+      #  (-bqe+sqrt(bqe^2-4*aqe*cqe))/(2*aqe)
+      #  ]));
+      smax = min(1,np.concatenate((-fu1[ifu1]/(dx[ifu1]-du[ifu1]) , -fu2[ifu2]/(-dx[ifu2]-du[ifu2]) , (-bqe + math.sqrt(bqe**2-4*aqe*cqe))/(2*aqe)),0).min())
+        
+      s = (0.99)*smax
+      
+      # backtracking line search
+      suffdec = 0
+      backiter = 0
+      while not suffdec:
+        #xp = x + s*dx;  up = u + s*du;  rp = r + s*Adx;
+        xp = x + s*dx
+        up = u + s*du
+        rp = r + s*Adx
+        #fu1p = xp - up;  fu2p = -xp - up;  fep = 1/2*(rp'*rp - epsilon^2);
+        fu1p = xp - up
+        fu2p = -xp - up
+        fep = 0.5*(np.vdot(r,r) - epsilon**2)
+        #fp = sum(up) - (1/tau)*(sum(log(-fu1p)) + sum(log(-fu2p)) + log(-fep));
+        fp = up.sum() - (1.0/tau)*(np.log(-fu1p).sum() + np.log(-fu2p).sum() + log(-fep))
+        #flin = f + alpha*s*(gradf'*[dx; du]);
+        flin = f + alpha*s*np.dot(gradf.T , np.concatenate((dx,du),0))
+        #suffdec = (fp <= flin);
+        if fp <= flin:
+            suffdec = True
+        else:
+            suffdec = False
+        
+        s = beta*s
+        backiter = backiter + 1
+        if (backiter > 32):
+          print 'Stuck on backtracking line search, returning previous iterate.  (See Section 4 of notes for more information.)'
+          xp = x.copy()
+          up = u.copy()
+          return xp,up,niter
+        #end
+      #end
+      
+      # set up for next iteration
+      #x = xp; u = up;  r = rp;
+      x = xp.copy()
+      u = up.copy()
+      r = rp.copy()
+      #fu1 = fu1p;  fu2 = fu2p;  fe = fep;  f = fp;
+      fu1 = fu1p.copy()
+      fu2 = fu2p.copy()
+      fe = fep
+      f = fp
+      
+      #lambda2 = -(gradf'*[dx; du]);
+      lambda2 = -np.dot(gradf , np.concatenate((dx,du),0))
+      #stepsize = s*norm([dx; du]);
+      stepsize = s * np.linalg.norm(np.concatenate((dx,du),0))
+      niter = niter + 1
+      #done = (lambda2/2 < newtontol) | (niter >= newtonmaxiter);
+      if lambda2/2.0 < newtontol or niter >= newtonmaxiter:
+          done = 1
+      else:
+          done = 0
+      
+      #disp(sprintf('Newton iter = #d, Functional = #8.3f, Newton decrement = #8.3f, Stepsize = #8.3e', ...
+      print 'Newton iter = ',niter,', Functional = ',f,', Newton decrement = ',lambda2/2.0,', Stepsize = ',stepsize
+
+      if largescale:
+        print '                CG Res = ',cgres,', CG Iter = ',cgiter
+      else:
+        print '                  H11p condition number = ',hcond
+      #end
+          
+    #end
+    return xp,up,niter
+
+#function xp = l1qc_logbarrier(x0, A, At, b, epsilon, lbtol, mu, cgtol, cgmaxiter)
+def l1qc_logbarrier(x0, A, At, b, epsilon, lbtol=1e-3, mu=10, cgtol=1e-8, cgmaxiter=200):
+    # Solve quadratically constrained l1 minimization:
+    # min ||x||_1   s.t.  ||Ax - b||_2 <= \epsilon
+    #
+    # Reformulate as the second-order cone program
+    # min_{x,u}  sum(u)   s.t.    x - u <= 0,
+    #                            -x - u <= 0,
+    #      1/2(||Ax-b||^2 - \epsilon^2) <= 0
+    # and use a log barrier algorithm.
+    #
+    # Usage:  xp = l1qc_logbarrier(x0, A, At, b, epsilon, lbtol, mu, cgtol, cgmaxiter)
+    #
+    # x0 - Nx1 vector, initial point.
+    #
+    # A - Either a handle to a function that takes a N vector and returns a K 
+    #     vector , or a KxN matrix.  If A is a function handle, the algorithm
+    #     operates in "largescale" mode, solving the Newton systems via the
+    #     Conjugate Gradients algorithm.
+    #
+    # At - Handle to a function that takes a K vector and returns an N vector.
+    #      If A is a KxN matrix, At is ignored.
+    #
+    # b - Kx1 vector of observations.
+    #
+    # epsilon - scalar, constraint relaxation parameter
+    #
+    # lbtol - The log barrier algorithm terminates when the duality gap <= lbtol.
+    #         Also, the number of log barrier iterations is completely
+    #         determined by lbtol.
+    #         Default = 1e-3.
+    #
+    # mu - Factor by which to increase the barrier constant at each iteration.
+    #      Default = 10.
+    #
+    # cgtol - Tolerance for Conjugate Gradients; ignored if A is a matrix.
+    #     Default = 1e-8.
+    #
+    # cgmaxiter - Maximum number of iterations for Conjugate Gradients; ignored
+    #     if A is a matrix.
+    #     Default = 200.
+    #
+    # Written by: Justin Romberg, Caltech
+    # Email: jrom@acm.caltech.edu
+    # Created: October 2005
+    #
+
+    #---------------------
+    # Original Matab code:
+    
+    #largescale = isa(A,'function_handle');
+    #
+    #if (nargin < 6), lbtol = 1e-3; end
+    #if (nargin < 7), mu = 10; end
+    #if (nargin < 8), cgtol = 1e-8; end
+    #if (nargin < 9), cgmaxiter = 200; end
+    #
+    #newtontol = lbtol;
+    #newtonmaxiter = 50;
+    #
+    #N = length(x0);
+    #
+    ## starting point --- make sure that it is feasible
+    #if (largescale)
+    #  if (norm(A(x0)-b) > epsilon)
+    #    disp('Starting point infeasible; using x0 = At*inv(AAt)*y.');
+    #    AAt = @(z) A(At(z));
+    #    w = cgsolve(AAt, b, cgtol, cgmaxiter, 0);
+    #    if (cgres > 1/2)
+    #      disp('A*At is ill-conditioned: cannot find starting point');
+    #      xp = x0;
+    #      return;
+    #    end
+    #    x0 = At(w);
+    #  end
+    #else
+    #  if (norm(A*x0-b) > epsilon)
+    #    disp('Starting point infeasible; using x0 = At*inv(AAt)*y.');
+    #    opts.POSDEF = true; opts.SYM = true;
+    #    [w, hcond] = linsolve(A*A', b, opts);
+    #    if (hcond < 1e-14)
+    #      disp('A*At is ill-conditioned: cannot find starting point');
+    #      xp = x0;
+    #      return;
+    #    end
+    #    x0 = A'*w;
+    #  end  
+    #end
+    #x = x0;
+    #u = (0.95)*abs(x0) + (0.10)*max(abs(x0));
+    #
+    #disp(sprintf('Original l1 norm = #.3f, original functional = #.3f', sum(abs(x0)), sum(u)));
+    #
+    ## choose initial value of tau so that the duality gap after the first
+    ## step will be about the origial norm
+    #tau = max((2*N+1)/sum(abs(x0)), 1);
+    #                                                                                                                          
+    #lbiter = ceil((log(2*N+1)-log(lbtol)-log(tau))/log(mu));
+    #disp(sprintf('Number of log barrier iterations = #d\n', lbiter));
+    #
+    #totaliter = 0;
+    #
+    ## Added by Nic
+    #if lbiter == 0
+    #    xp = zeros(size(x0));
+    #end
+    #
+    #for ii = 1:lbiter
+    #
+    #  [xp, up, ntiter] = l1qc_newton(x, u, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter);
+    #  totaliter = totaliter + ntiter;
+    #  
+    #  disp(sprintf('\nLog barrier iter = #d, l1 = #.3f, functional = #8.3f, tau = #8.3e, total newton iter = #d\n', ...
+    #    ii, sum(abs(xp)), sum(up), tau, totaliter));
+    #  
+    #  x = xp;
+    #  u = up;
+    # 
+    #  tau = mu*tau;
+    #  
+    #end
+    #
+    # End of original Matab code
+    #----------------------------
+    
+    #largescale = isa(A,'function_handle');
+    if hasattr(A, '__call__'):
+        largescale = True
+    else:
+        largescale = False
+    
+    #    if (nargin < 6), lbtol = 1e-3; end
+    #    if (nargin < 7), mu = 10; end
+    #    if (nargin < 8), cgtol = 1e-8; end
+    #    if (nargin < 9), cgmaxiter = 200; end
+    # Nic: added them as optional parameteres
+    
+    newtontol = lbtol
+    newtonmaxiter = 50
+    
+    #N = length(x0);
+    N = x0.size()
+    
+    # starting point --- make sure that it is feasible
+    if largescale:
+      if np.linalg.norm(A(x0) - b) > epsilon:
+        print 'Starting point infeasible; using x0 = At*inv(AAt)*y.'
+        #AAt = @(z) A(At(z));
+        AAt = lambda z: A(At(z))
+        # TODO: implement cgsolve
+        w,cgres,cgiter = cgsolve(AAt, b, cgtol, cgmaxiter, 0)
+        if (cgres > 1/2):
+          print 'A*At is ill-conditioned: cannot find starting point'
+          xp = x0.copy()
+          return xp
+        #end
+        x0 = At(w)
+      #end
+    else:
+      if np.linalg.norm( np.dot(A,x0) - b ) > epsilon:
+        print 'Starting point infeasible; using x0 = At*inv(AAt)*y.'
+        #opts.POSDEF = true; opts.SYM = true;
+        #[w, hcond] = linsolve(A*A', b, opts);
+        #if (hcond < 1e-14)
+        #  disp('A*At is ill-conditioned: cannot find starting point');
+        #  xp = x0;
+        #  return;
+        #end
+        try:
+            w = scipy.linalg.solve(np.dot(A,A.T), b, sym_pos=True)
+            hcond = 1.0/scipy.linalg.cond(np.dot(A,A.T))
+        except scipy.linalg.LinAlgError:
+            print 'A*At is ill-conditioned: cannot find starting point'
+            xp = x0.copy()
+            return xp
+        if hcond < 1e-14:
+            print 'A*At is ill-conditioned: cannot find starting point'
+            xp = x0.copy()
+            return xp           
+        #x0 = A'*w;
+        x0 = np.dot(A.T, w)
+      #end  
+    #end
+    x = x0.copy()
+    u = (0.95)*np.abs(x0) + (0.10)*np.abs(x0).max()
+    
+    #disp(sprintf('Original l1 norm = #.3f, original functional = #.3f', sum(abs(x0)), sum(u)));
+    print 'Original l1 norm = ',np.abs(x0).sum(),'original functional = ',u.sum()
+    
+    # choose initial value of tau so that the duality gap after the first
+    # step will be about the origial norm
+    tau = max(((2*N+1)/np.abs(x0).sum()), 1)
+                                                                                                                              
+    lbiter = math.ceil((math.log(2*N+1)-math.log(lbtol)-math.log(tau))/math.log(mu))
+    #disp(sprintf('Number of log barrier iterations = #d\n', lbiter));
+    print 'Number of log barrier iterations = ',lbiter
+    
+    totaliter = 0
+    
+    # Added by Nic, to fix some crashing
+    if lbiter == 0:
+        xp = np.zeros(x0.size)
+    #end
+    
+    #for ii = 1:lbiter
+    for ii in np.arange(lbiter):
+    
+      xp,up,ntiter = l1qc_newton(x, u, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter)
+      totaliter = totaliter + ntiter
+      
+      #disp(sprintf('\nLog barrier iter = #d, l1 = #.3f, functional = #8.3f, tau = #8.3e, total newton iter = #d\n', ...
+      #  ii, sum(abs(xp)), sum(up), tau, totaliter));
+      print 'Log barrier iter = ',ii,', l1 = ',np.abs(xp).sum(),', functional = ',up.sum(),', tau = ',tau,', total newton iter = ',totaliter
+      x = xp.copy()
+      u = up.copy()
+     
+      tau = mu*tau
+      
+    #end
+    return xp
+                   
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyCSalgos/OMP/omp_QR.py	Fri Oct 21 13:53:49 2011 +0000
@@ -0,0 +1,842 @@
+import numpy as np
+import scipy.linalg
+import time
+import math
+
+
+#function [s, err_mse, iter_time]=greed_omp_qr(x,A,m,varargin)
+def greed_omp_qr(x,A,m,opts=[]):
+# greed_omp_qr: Orthogonal Matching Pursuit algorithm based on QR
+# factorisation
+# Nic: translated to Python on 19.10.2011. Original Matlab Code by Thomas Blumensath
+###########################################################################
+# Usage
+# [s, err_mse, iter_time]=greed_omp_qr(x,P,m,'option_name','option_value')
+###########################################################################
+###########################################################################
+# Input
+#   Mandatory:
+#               x   Observation vector to be decomposed
+#               P   Either:
+#                       1) An nxm matrix (n must be dimension of x)
+#                       2) A function handle (type "help function_format" 
+#                          for more information)
+#                          Also requires specification of P_trans option.
+#                       3) An object handle (type "help object_format" for 
+#                          more information)
+#               m   length of s 
+#
+#   Possible additional options:
+#   (specify as many as you want using 'option_name','option_value' pairs)
+#   See below for explanation of options:
+#__________________________________________________________________________
+#   option_name    |     available option_values                | default
+#--------------------------------------------------------------------------
+#   stopCrit       | M, corr, mse, mse_change                   | M
+#   stopTol        | number (see below)                         | n/4
+#   P_trans        | function_handle (see below)                | 
+#   maxIter        | positive integer (see below)               | n
+#   verbose        | true, false                                | false
+#   start_val      | vector of length m                         | zeros
+#
+#   Available stopping criteria :
+#               M           -   Extracts exactly M = stopTol elements.
+#               corr        -   Stops when maximum correlation between
+#                               residual and atoms is below stopTol value.
+#               mse         -   Stops when mean squared error of residual 
+#                               is below stopTol value.
+#               mse_change  -   Stops when the change in the mean squared 
+#                               error falls below stopTol value.
+#
+#   stopTol: Value for stopping criterion.
+#
+#   P_trans: If P is a function handle, then P_trans has to be specified and 
+#            must be a function handle. 
+#
+#   maxIter: Maximum number of allowed iterations.
+#
+#   verbose: Logical value to allow algorithm progress to be displayed.
+#
+#   start_val: Allows algorithms to start from partial solution.
+#
+###########################################################################
+# Outputs
+#    s              Solution vector 
+#    err_mse        Vector containing mse of approximation error for each 
+#                   iteration
+#    iter_time      Vector containing computation times for each iteration
+#
+###########################################################################
+# Description
+#   greed_omp_qr performs a greedy signal decomposition. 
+#   In each iteration a new element is selected depending on the inner
+#   product between the current residual and columns in P.
+#   The non-zero elements of s are approximated by orthogonally projecting 
+#   x onto the selected elements in each iteration.
+#   The algorithm uses QR decomposition.
+#
+# See Also
+#   greed_omp_chol, greed_omp_cg, greed_omp_cgp, greed_omp_pinv, 
+#   greed_omp_linsolve, greed_gp, greed_nomp
+#
+# Copyright (c) 2007 Thomas Blumensath
+#
+# The University of Edinburgh
+# Email: thomas.blumensath@ed.ac.uk
+# Comments and bug reports welcome
+#
+# This file is part of sparsity Version 0.1
+# Created: April 2007
+#
+# Part of this toolbox was developed with the support of EPSRC Grant
+# D000246/1
+#
+# Please read COPYRIGHT.m for terms and conditions.
+
+    ###########################################################################
+    #                    Default values and initialisation
+    ###########################################################################
+    #[n1 n2]=size(x);
+    #n1,n2 = x.shape
+    #if n2 == 1
+    #    n=n1;
+    #elseif n1 == 1
+    #    x=x';
+    #    n=n2;
+    #else
+    #   display('x must be a vector.');
+    #   return
+    #end
+    if x.ndim != 1:
+      print 'x must be a vector.'
+      return
+    n = x.size
+        
+    #sigsize     = x'*x/n;
+    sigsize     = np.vdot(x,x)/n;
+    initial_given = 0;
+    err_mse     = np.array([]);
+    iter_time   = np.array([]);
+    STOPCRIT    = 'M';
+    STOPTOL     = math.ceil(n/4.0);
+    MAXITER     = n;
+    verbose     = False;
+    s_initial   = np.zeros(m);
+    
+    if verbose:
+       print 'Initialising...'
+    #end
+    
+    ###########################################################################
+    #                           Output variables
+    ###########################################################################
+    #switch nargout 
+    #    case 3
+    #        comp_err=true;
+    #        comp_time=true;
+    #    case 2 
+    #        comp_err=true;
+    #        comp_time=false;
+    #    case 1
+    #        comp_err=false;
+    #        comp_time=false;
+    #    case 0
+    #        error('Please assign output variable.')
+    #    otherwise
+    #        error('Too many output arguments specified')
+    #end
+    if 'nargout' in opts:
+      if opts['nargout'] == 3:
+        comp_err  = True
+        comp_time = True
+      elif opts['nargout'] == 2:
+        comp_err  = True
+        comp_time = False
+      elif opts['nargout'] == 1:
+        comp_err  = False
+        comp_time = False
+      elif opts['nargout'] == 0:
+        print 'Please assign output variable.'
+        return
+      else:
+        print 'Too many output arguments specified'
+        return
+    else:
+        # If not given, make default nargout = 3
+        #  and add nargout to options
+        opts['nargout'] = 3
+        comp_err  = True
+        comp_time = True
+    
+    ###########################################################################
+    #                       Look through options
+    ###########################################################################
+    # Put option into nice format
+    #Options={};
+    #OS=nargin-3;
+    #c=1;
+    #for i=1:OS
+    #    if isa(varargin{i},'cell')
+    #        CellSize=length(varargin{i});
+    #        ThisCell=varargin{i};
+    #        for j=1:CellSize
+    #            Options{c}=ThisCell{j};
+    #            c=c+1;
+    #        end
+    #    else
+    #        Options{c}=varargin{i};
+    #        c=c+1;
+    #    end
+    #end
+    #OS=length(Options);
+    #if rem(OS,2)
+    #   error('Something is wrong with argument name and argument value pairs.') 
+    #end
+    #
+    #for i=1:2:OS
+    #   switch Options{i}
+    #        case {'stopCrit'}
+    #            if (strmatch(Options{i+1},{'M'; 'corr'; 'mse'; 'mse_change'},'exact'));
+    #                STOPCRIT    = Options{i+1};  
+    #            else error('stopCrit must be char string [M, corr, mse, mse_change]. Exiting.'); end 
+    #        case {'stopTol'}
+    #            if isa(Options{i+1},'numeric') ; STOPTOL     = Options{i+1};   
+    #            else error('stopTol must be number. Exiting.'); end
+    #        case {'P_trans'} 
+    #            if isa(Options{i+1},'function_handle'); Pt = Options{i+1};   
+    #            else error('P_trans must be function _handle. Exiting.'); end
+    #        case {'maxIter'}
+    #            if isa(Options{i+1},'numeric'); MAXITER     = Options{i+1};             
+    #            else error('maxIter must be a number. Exiting.'); end
+    #        case {'verbose'}
+    #            if isa(Options{i+1},'logical'); verbose     = Options{i+1};   
+    #            else error('verbose must be a logical. Exiting.'); end 
+    #        case {'start_val'}
+    #            if isa(Options{i+1},'numeric') & length(Options{i+1}) == m ;
+    #                s_initial     = Options{i+1};   
+    #                initial_given=1;
+    #            else error('start_val must be a vector of length m. Exiting.'); end
+    #        otherwise
+    #            error('Unrecognised option. Exiting.') 
+    #   end
+    #end
+    if 'stopCrit' in opts:
+        STOPCRIT = opts['stopCrit']
+    if 'stopTol' in opts:
+        if hasattr(opts['stopTol'], '__int__'):  # check if numeric
+            STOPTOL = opts['stopTol']
+        else:
+            raise TypeError('stopTol must be number. Exiting.')
+    if 'P_trans' in opts:
+        if hasattr(opts['P_trans'], '__call__'):  # check if function handle
+            Pt = opts['P_trans']
+        else:
+            raise TypeError('P_trans must be function _handle. Exiting.')
+    if 'maxIter' in opts:
+        if hasattr(opts['maxIter'], '__int__'):  # check if numeric
+            MAXITER = opts['maxIter']
+        else:
+            raise TypeError('maxIter must be a number. Exiting.')
+    if 'verbose' in opts:
+        # TODO: Should check here if is logical
+        verbose = opts['verbose']
+    if 'start_val' in opts:
+        # TODO: Should check here if is numeric
+        if opts['start_val'].size == m:
+            s_initial = opts['start_val']
+            initial_given = 1
+        else:
+            raise ValueError('start_val must be a vector of length m. Exiting.')
+    # Don't exit if unknown option is given, simply ignore it
+
+    #if strcmp(STOPCRIT,'M') 
+    #    maxM=STOPTOL;
+    #else
+    #    maxM=MAXITER;
+    #end
+    if STOPCRIT == 'M':
+        maxM = STOPTOL
+    else:
+        maxM = MAXITER
+    
+    #    if nargout >=2
+    #        err_mse = zeros(maxM,1);
+    #    end
+    #    if nargout ==3
+    #        iter_time = zeros(maxM,1);
+    #    end
+    if opts['nargout'] >= 2:
+        err_mse = np.zeros(maxM)
+    if opts['nargout'] == 3:
+        iter_time = np.zeros(maxM)
+    
+    ###########################################################################
+    #                        Make P and Pt functions
+    ###########################################################################
+    #if          isa(A,'float')      P =@(z) A*z;  Pt =@(z) A'*z;
+    #elseif      isobject(A)         P =@(z) A*z;  Pt =@(z) A'*z;
+    #elseif      isa(A,'function_handle') 
+    #    try
+    #        if          isa(Pt,'function_handle'); P=A;
+    #        else        error('If P is a function handle, Pt also needs to be a function handle. Exiting.'); end
+    #    catch error('If P is a function handle, Pt needs to be specified. Exiting.'); end
+    #else        error('P is of unsupported type. Use matrix, function_handle or object. Exiting.'); end
+    if hasattr(A, '__call__'):
+        if hasattr(Pt, '__call__'):
+            P = A
+        else:
+            raise TypeError('If P is a function handle, Pt also needs to be a function handle.')
+    else:
+        # TODO: should check here if A is matrix
+        P  = lambda z: np.dot(A,z)
+        Pt = lambda z: np.dot(A.T,z)
+        
+    ###########################################################################
+    #                 Random Check to see if dictionary is normalised 
+    ###########################################################################
+    #    mask=zeros(m,1);
+    #    mask(ceil(rand*m))=1;
+    #    nP=norm(P(mask));
+    #    if abs(1-nP)>1e-3;
+    #        display('Dictionary appears not to have unit norm columns.')
+    #    end
+    mask = np.zeros(m)
+    mask[math.floor(np.random.rand() * m)] = 1
+    nP = np.linalg.norm(P(mask))
+    if abs(1-nP) > 1e-3:
+        print 'Dictionary appears not to have unit norm columns.'
+    #end
+    
+    ###########################################################################
+    #              Check if we have enough memory and initialise 
+    ###########################################################################
+    #        try Q=zeros(n,maxM);
+    #        catch error('Variable size is too large. Please try greed_omp_chol algorithm or reduce MAXITER.')
+    #        end 
+    #        try R=zeros(maxM);
+    #        catch error('Variable size is too large. Please try greed_omp_chol algorithm or reduce MAXITER.')
+    #        end
+    try:
+        Q = np.zeros((n,maxM))
+    except:
+        print 'Variable size is too large. Please try greed_omp_chol algorithm or reduce MAXITER.'
+        raise
+    try:
+        R = np.zeros((maxM, maxM))
+    except:
+        print 'Variable size is too large. Please try greed_omp_chol algorithm or reduce MAXITER.'
+        raise
+        
+    ###########################################################################
+    #                        Do we start from zero or not?
+    ###########################################################################
+    #if initial_given ==1;
+    #    IN          = find(s_initial);
+    #    if ~isempty(IN)
+    #        Residual    = x-P(s_initial);
+    #        lengthIN=length(IN);
+    #        z=[];
+    #        for k=1:length(IN)
+    #            # Extract new element
+    #             mask=zeros(m,1);
+    #             mask(IN(k))=1;
+    #             new_element=P(mask);
+    #
+    #            # Orthogonalise new element 
+    #             qP=Q(:,1:k-1)'*new_element;
+    #             q=new_element-Q(:,1:k-1)*(qP);
+    #
+    #             nq=norm(q);
+    #             q=q/nq;
+    #            # Update QR factorisation 
+    #             R(1:k-1,k)=qP;
+    #             R(k,k)=nq;
+    #             Q(:,k)=q;
+    #
+    #             z(k)=q'*x;
+    #        end
+    #        s           = s_initial;
+    #        Residual=x-Q(:,k)*z;
+    #        oldERR      = Residual'*Residual/n;
+    #    else
+    #    	IN          = [];
+    #        Residual    = x;
+    #        s           = s_initial;
+    #        sigsize     = x'*x/n;
+    #        oldERR      = sigsize;
+    #        k=0;
+    #        z=[];
+    #    end
+    #    
+    #else
+    #    IN          = [];
+    #    Residual    = x;
+    #    s           = s_initial;
+    #    sigsize     = x'*x/n;
+    #    oldERR      = sigsize;
+    #    k=0;
+    #    z=[];
+    #end
+    if initial_given == 1:
+        #IN = find(s_initial);
+        IN = np.nonzero(s_initial)[0].tolist()
+        #if ~isempty(IN)
+        if IN.size > 0:
+            Residual = x - P(s_initial)
+            lengthIN = IN.size
+            z = np.array([])
+            #for k=1:length(IN)
+            for k in np.arange(IN.size):
+                # Extract new element
+                 mask = np.zeros(m)
+                 mask[IN[k]] = 1
+                 new_element = P(mask)
+                 
+                 # Orthogonalise new element 
+                 #qP=Q(:,1:k-1)'*new_element;
+                 if k-1 >= 0:
+                     qP = np.dot(Q[:,0:k].T , new_element)
+                     #q=new_element-Q(:,1:k-1)*(qP);
+                     q = new_element - np.dot(Q[:,0:k] , qP)
+                     
+                     nq = np.linalg.norm(q)
+                     q = q / nq
+                     # Update QR factorisation 
+                     R[0:k,k] = qP
+                     R[k,k] = nq
+                     Q[:,k] = q
+                 else:
+                     q = new_element
+                     
+                     nq = np.linalg.norm(q)
+                     q = q / nq
+                     # Update QR factorisation 
+                     R[k,k] = nq
+                     Q[:,k] = q
+                     
+                 z[k] = np.dot(q.T , x)
+            #end
+            s        = s_initial.copy()
+            Residual = x - np.dot(Q[:,k] , z)
+            oldERR   = np.vdot(Residual , Residual) / n;
+        else:
+            #IN          = np.array([], dtype = int)
+            IN          = np.array([], dtype = int).tolist()
+            Residual    = x.copy()
+            s           = s_initial.copy()
+            sigsize     = np.vdot(x , x) / n
+            oldERR      = sigsize
+            k = 0
+            #z = np.array([])
+            z = []
+        #end
+        
+    else:
+        #IN          = np.array([], dtype = int)
+        IN          = np.array([], dtype = int).tolist()
+        Residual    = x.copy()
+        s           = s_initial.copy()
+        sigsize     = np.vdot(x , x) / n
+        oldERR      = sigsize
+        k = 0
+        #z = np.array([])
+        z = []
+    #end
+    
+    ###########################################################################
+    #                        Main algorithm
+    ###########################################################################
+    #    if verbose
+    #       display('Main iterations...') 
+    #    end
+    #    tic
+    #    t=0;
+    #    DR=Pt(Residual);
+    #    done = 0;
+    #    iter=1;
+    if verbose:
+       print 'Main iterations...'
+    tic = time.time()
+    t = 0
+    DR = Pt(Residual)
+    done = 0
+    iter = 1
+    
+    #while ~done
+    #    
+    #     # Select new element
+    #     DR(IN)=0;
+    #     # Nic: replace selection with random variable
+    #     # i.e. Randomized OMP!!
+    #     # DON'T FORGET ABOUT THIS!!
+    #     [v I]=max(abs(DR));
+    #     #I = randp(exp(abs(DR).^2 ./ (norms.^2)'), [1 1]);
+    #     IN=[IN I];
+    #
+    #    
+    #     k=k+1;
+    #     # Extract new element
+    #     mask=zeros(m,1);
+    #     mask(IN(k))=1;
+    #     new_element=P(mask);
+    #
+    #    # Orthogonalise new element 
+    #     qP=Q(:,1:k-1)'*new_element;
+    #     q=new_element-Q(:,1:k-1)*(qP);
+    #
+    #     nq=norm(q);
+    #     q=q/nq;
+    #    # Update QR factorisation 
+    #     R(1:k-1,k)=qP;
+    #     R(k,k)=nq;
+    #     Q(:,k)=q;
+    #
+    #     z(k)=q'*x;
+    #   
+    #    # New residual 
+    #     Residual=Residual-q*(z(k));
+    #     DR=Pt(Residual);
+    #     
+    #     ERR=Residual'*Residual/n;
+    #     if comp_err
+    #         err_mse(iter)=ERR;
+    #     end
+    #     
+    #     if comp_time
+    #         iter_time(iter)=toc;
+    #     end
+    #
+    ############################################################################
+    ##                        Are we done yet?
+    ############################################################################
+    #     
+    #     if strcmp(STOPCRIT,'M')
+    #         if iter >= STOPTOL
+    #             done =1;
+    #         elseif verbose && toc-t>10
+    #            display(sprintf('Iteration #i. --- #i iterations to go',iter ,STOPTOL-iter)) 
+    #            t=toc;
+    #         end
+    #    elseif strcmp(STOPCRIT,'mse')
+    #         if comp_err
+    #            if err_mse(iter)<STOPTOL;
+    #                done = 1; 
+    #            elseif verbose && toc-t>10
+    #                display(sprintf('Iteration #i. --- #i mse',iter ,err_mse(iter))) 
+    #                t=toc;
+    #            end
+    #         else
+    #             if ERR<STOPTOL;
+    #                done = 1; 
+    #             elseif verbose && toc-t>10
+    #                display(sprintf('Iteration #i. --- #i mse',iter ,ERR)) 
+    #                t=toc;
+    #             end
+    #         end
+    #     elseif strcmp(STOPCRIT,'mse_change') && iter >=2
+    #         if comp_err && iter >=2
+    #              if ((err_mse(iter-1)-err_mse(iter))/sigsize <STOPTOL);
+    #                done = 1; 
+    #             elseif verbose && toc-t>10
+    #                display(sprintf('Iteration #i. --- #i mse change',iter ,(err_mse(iter-1)-err_mse(iter))/sigsize )) 
+    #                t=toc;
+    #             end
+    #         else
+    #             if ((oldERR - ERR)/sigsize < STOPTOL);
+    #                done = 1; 
+    #             elseif verbose && toc-t>10
+    #                display(sprintf('Iteration #i. --- #i mse change',iter ,(oldERR - ERR)/sigsize)) 
+    #                t=toc;
+    #             end
+    #         end
+    #     elseif strcmp(STOPCRIT,'corr') 
+    #          if max(abs(DR)) < STOPTOL;
+    #             done = 1; 
+    #          elseif verbose && toc-t>10
+    #                display(sprintf('Iteration #i. --- #i corr',iter ,max(abs(DR)))) 
+    #                t=toc;
+    #          end
+    #     end
+    #     
+    #    # Also stop if residual gets too small or maxIter reached
+    #     if comp_err
+    #         if err_mse(iter)<1e-16
+    #             display('Stopping. Exact signal representation found!')
+    #             done=1;
+    #         end
+    #     else
+    #
+    #
+    #         if iter>1
+    #             if ERR<1e-16
+    #                 display('Stopping. Exact signal representation found!')
+    #                 done=1;
+    #             end
+    #         end
+    #     end
+    #
+    #     if iter >= MAXITER
+    #         display('Stopping. Maximum number of iterations reached!')
+    #         done = 1; 
+    #     end
+    #     
+    ############################################################################
+    ##                    If not done, take another round
+    ############################################################################
+    #   
+    #     if ~done
+    #        iter=iter+1;
+    #        oldERR=ERR;
+    #     end
+    #end
+    while not done:
+         
+         # Select new element
+         DR[IN]=0
+         #[v I]=max(abs(DR));
+         #v = np.abs(DR).max()
+         I = np.abs(DR).argmax()
+         #IN = np.concatenate((IN,I))
+         IN.append(I)
+         
+         
+         #k = k + 1  Move to end, since is zero based
+         
+         # Extract new element
+         mask = np.zeros(m)
+         mask[IN[k]] = 1
+         new_element = P(mask)
+    
+         # Orthogonalise new element 
+         if k-1 >= 0:
+             qP = np.dot(Q[:,0:k].T , new_element)
+             q = new_element - np.dot(Q[:,0:k] , qP)
+             
+             nq = np.linalg.norm(q)
+             q = q/nq
+             # Update QR factorisation 
+             R[0:k,k] = qP
+             R[k,k] = nq
+             Q[:,k] = q                
+         else:
+             q = new_element
+             
+             nq = np.linalg.norm(q)
+             q = q/nq
+             # Update QR factorisation 
+             R[k,k] = nq
+             Q[:,k] = q
+
+         #z[k]=np.vdot(q , x)
+         z.append(np.vdot(q , x))
+       
+         # New residual 
+         Residual = Residual - q * (z[k])
+         DR = Pt(Residual)
+         
+         ERR = np.vdot(Residual , Residual) / n
+         if comp_err:
+             err_mse[iter-1] = ERR
+         #end
+         
+         if comp_time:
+             iter_time[iter-1] = time.time() - tic
+         #end
+         
+         ###########################################################################
+         #                        Are we done yet?
+         ###########################################################################
+         if STOPCRIT == 'M':
+             if iter >= STOPTOL:
+                 done = 1
+             elif verbose and time.time() - t > 10.0/1000: # time() returns sec
+                #display(sprintf('Iteration #i. --- #i iterations to go',iter ,STOPTOL-iter)) 
+                print 'Iteration '+iter+'. --- '+(STOPTOL-iter)+' iterations to go'
+                t = time.time()
+             #end
+         elif STOPCRIT =='mse':
+             if comp_err:
+                if err_mse[iter-1] < STOPTOL:
+                    done = 1 
+                elif verbose and time.time() - t > 10.0/1000: # time() returns sec
+                    #display(sprintf('Iteration #i. --- #i mse',iter ,err_mse(iter))) 
+                    print 'Iteration '+iter+'. --- '+err_mse[iter-1]+' mse'
+                    t = time.time()
+                #end
+             else:
+                 if ERR < STOPTOL:
+                    done = 1 
+                 elif verbose and time.time() - t > 10.0/1000: # time() returns sec
+                    #display(sprintf('Iteration #i. --- #i mse',iter ,ERR)) 
+                    print 'Iteration '+iter+'. --- '+ERR+' mse'
+                    t = time.time()
+                 #end
+             #end
+         elif STOPCRIT == 'mse_change' and iter >=2:
+             if comp_err and iter >=2:
+                 if ((err_mse[iter-2] - err_mse[iter-1])/sigsize < STOPTOL):
+                    done = 1
+                 elif verbose and time.time() - t > 10.0/1000: # time() returns sec
+                    #display(sprintf('Iteration #i. --- #i mse change',iter ,(err_mse(iter-1)-err_mse(iter))/sigsize )) 
+                    print 'Iteration '+iter+'. --- '+((err_mse[iter-2]-err_mse[iter-1])/sigsize)+' mse change'
+                    t = time.time()
+                 #end
+             else:
+                 if ((oldERR - ERR)/sigsize < STOPTOL):
+                    done = 1
+                 elif verbose and time.time() - t > 10.0/1000: # time() returns sec
+                    #display(sprintf('Iteration #i. --- #i mse change',iter ,(oldERR - ERR)/sigsize)) 
+                    print 'Iteration '+iter+'. --- '+((oldERR - ERR)/sigsize)+' mse change'
+                    t = time.time()
+                 #end
+             #end
+         elif STOPCRIT == 'corr':
+              if np.abs(DR).max() < STOPTOL:
+                 done = 1 
+              elif verbose and time.time() - t > 10.0/1000: # time() returns sec
+                  #display(sprintf('Iteration #i. --- #i corr',iter ,max(abs(DR)))) 
+                  print 'Iteration '+iter+'. --- '+(np.abs(DR).max())+' corr'
+                  t = time.time()
+              #end
+          #end
+         
+         # Also stop if residual gets too small or maxIter reached
+         if comp_err:
+             if err_mse[iter-1] < 1e-14:
+                 done = 1
+                 # Nic: added verbose check
+                 if verbose:
+                     print 'Stopping. Exact signal representation found!'
+             #end
+         else:
+             if iter > 1:
+                 if ERR < 1e-14:
+                     done = 1 
+                     # Nic: added verbose check
+                     if verbose:
+                         print 'Stopping. Exact signal representation found!'
+                 #end
+             #end
+         #end
+         
+         
+         if iter >= MAXITER:
+             done = 1 
+             # Nic: added verbose check
+             if verbose:
+                 print 'Stopping. Maximum number of iterations reached!'
+         #end
+         
+         ###########################################################################
+         #                    If not done, take another round
+         ###########################################################################
+         if not done:
+            iter = iter + 1
+            oldERR = ERR
+         #end
+         
+         # Moved here from front, since we are 0-based         
+         k = k + 1
+    #end
+    
+    ###########################################################################
+    #            Now we can solve for s by back-substitution
+    ###########################################################################
+    #s(IN)=R(1:k,1:k)\z(1:k)';
+    s[IN] = scipy.linalg.solve(R[0:k,0:k] , np.array(z[0:k]))
+    
+    ###########################################################################
+    #                  Only return as many elements as iterations
+    ###########################################################################
+    if opts['nargout'] >= 2:
+        err_mse = err_mse[0:iter-1]
+    #end
+    if opts['nargout'] == 3:
+        iter_time = iter_time[0:iter-1]
+    #end
+    if verbose:
+       print 'Done'
+    #end
+    
+    # Return
+    if opts['nargout'] == 1:
+        return s
+    elif opts['nargout'] == 2:
+        return s, err_mse
+    elif opts['nargout'] == 3:
+        return s, err_mse, iter_time
+    
+    # Change history
+    #
+    # 8 of Februray: Algo does no longer stop if dictionary is not normaliesd. 
+
+    # End of greed_omp_qr() function
+    #--------------------------------
+    
+    
+def omp_qr(x, dict, D, natom, tolerance):
+    """ Recover x using QR implementation of OMP
+
+   Parameter
+   ---------
+   x: measurements
+   dict: dictionary
+   D: Gramian of dictionary
+   natom: iterations
+   tolerance: error tolerance
+
+   Return
+   ------
+   x_hat : estimate of x
+   gamma : indices where non-zero
+
+   For more information, see http://media.aau.dk/null_space_pursuits/2011/10/efficient-omp.html
+   """
+    msize, dictsize = dict.shape
+    normr2 = np.vdot(x,x)
+    normtol2 = tolerance*normr2
+    R = np.zeros((natom,natom))
+    Q = np.zeros((msize,natom))
+    gamma = []
+
+    # find initial projections
+    origprojections = np.dot(x.T,dict)
+    origprojectionsT = origprojections.T
+    projections = origprojections.copy();
+
+    k = 0
+    while (normr2 > normtol2) and (k < natom):
+      # find index of maximum magnitude projection
+      newgam = np.argmax(np.abs(projections ** 2))
+      gamma.append(newgam)
+      # update QR factorization, projections, and residual energy
+      if k == 0:
+        R[0,0] = 1
+        Q[:,0] = dict[:,newgam].copy()
+        # update projections
+        QtempQtempT = np.outer(Q[:,0],Q[:,0])
+        projections -= np.dot(x.T, np.dot(QtempQtempT,dict))
+        # update residual energy
+        normr2 -= np.vdot(x, np.dot(QtempQtempT,x))
+      else:
+        w = scipy.linalg.solve_triangular(R[0:k,0:k],D[gamma[0:k],newgam],trans=1)
+        R[k,k] = np.sqrt(1-np.vdot(w,w))
+        R[0:k,k] = w.copy()
+        Q[:,k] = (dict[:,newgam] - np.dot(QtempQtempT,dict[:,newgam]))/R[k,k]
+        QkQkT = np.outer(Q[:,k],Q[:,k])
+        xTQkQkT = np.dot(x.T,QkQkT)
+        QtempQtempT += QkQkT
+        # update projections
+        projections -= np.dot(xTQkQkT,dict)
+        # update residual energy
+        normr2 -= np.dot(xTQkQkT,x)
+
+      k += 1
+
+    # build solution
+    tempR = R[0:k,0:k]
+    w = scipy.linalg.solve_triangular(tempR,origprojectionsT[gamma[0:k]],trans=1)
+    x_hat = np.zeros((dictsize,1))
+    x_hat[gamma[0:k]] = scipy.linalg.solve_triangular(tempR,w)
+
+    return x_hat, gamma
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyCSalgos/OMP/omp_sk_bugfix.py	Fri Oct 21 13:53:49 2011 +0000
@@ -0,0 +1,564 @@
+"""Orthogonal matching pursuit algorithms
+"""
+
+# Author: Vlad Niculae
+#
+# License: BSD Style.
+
+from warnings import warn
+
+import numpy as np
+from scipy import linalg
+from scipy.linalg.lapack import get_lapack_funcs
+
+#from .base import LinearModel
+#from ..utils.arrayfuncs import solve_triangular
+from sklearn.linear_model.base import LinearModel
+from sklearn.utils.arrayfuncs import solve_triangular
+
+premature = """ Orthogonal matching pursuit ended prematurely due to linear
+dependence in the dictionary. The requested precision might not have been met.
+"""
+
+
+def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True):
+    """Orthogonal Matching Pursuit step using the Cholesky decomposition.
+
+Parameters:
+-----------
+X: array, shape = (n_samples, n_features)
+Input dictionary. Columns are assumed to have unit norm.
+
+y: array, shape = (n_samples,)
+Input targets
+
+n_nonzero_coefs: int
+Targeted number of non-zero elements
+
+tol: float
+Targeted squared error, if not None overrides n_nonzero_coefs.
+
+copy_X: bool, optional
+Whether the design matrix X must be copied by the algorithm. A false
+value is only helpful if X is already Fortran-ordered, otherwise a
+copy is made anyway.
+
+Returns:
+--------
+gamma: array, shape = (n_nonzero_coefs,)
+Non-zero elements of the solution
+
+idx: array, shape = (n_nonzero_coefs,)
+Indices of the positions of the elements in gamma within the solution
+vector
+
+"""
+    if copy_X:
+        X = X.copy('F')
+    else: # even if we are allowed to overwrite, still copy it if bad order
+        X = np.asfortranarray(X)
+
+    min_float = np.finfo(X.dtype).eps
+    nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
+    potrs, = get_lapack_funcs(('potrs',), (X,))
+
+    alpha = np.dot(X.T, y)
+    residual = y
+    n_active = 0
+    indices = range(X.shape[1]) # keeping track of swapping
+
+    #max_features = X.shape[1] if tol is not None else n_nonzero_coefs
+    # Nic: tol not None should not overide n_nonzero_coefs, but act together
+    max_features = n_nonzero_coefs
+    
+    L = np.empty((max_features, max_features), dtype=X.dtype)
+    L[0, 0] = 1.
+
+    while True:
+        lam = np.argmax(np.abs(np.dot(X.T, residual)))
+        if lam < n_active or alpha[lam] ** 2 < min_float:
+            # atom already selected or inner product too small
+            warn(premature)
+            break
+        if n_active > 0:
+            # Updates the Cholesky decomposition of X' X
+            L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
+            solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
+            v = nrm2(L[n_active, :n_active]) ** 2
+            if 1 - v <= min_float: # selected atoms are dependent
+                warn(premature)
+                break
+            L[n_active, n_active] = np.sqrt(1 - v)
+        X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
+        alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
+        indices[n_active], indices[lam] = indices[lam], indices[n_active]
+        n_active += 1
+        # solves LL'x = y as a composition of two triangular systems
+        gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
+                         overwrite_b=False)
+
+        residual = y - np.dot(X[:, :n_active], gamma)
+        if tol is not None and nrm2(residual) ** 2 <= tol:
+            break
+        #elif n_active == max_features:
+        # Nic: tol not None should not overide n_nonzero_coefs, but act together
+        if n_active == max_features:
+            break
+
+    return gamma, indices[:n_active]
+
+
+def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
+              copy_Gram=True, copy_Xy=True):
+    """Orthogonal Matching Pursuit step on a precomputed Gram matrix.
+
+This function uses the the Cholesky decomposition method.
+
+Parameters:
+-----------
+Gram: array, shape = (n_features, n_features)
+Gram matrix of the input data matrix
+
+Xy: array, shape = (n_features,)
+Input targets
+
+n_nonzero_coefs: int
+Targeted number of non-zero elements
+
+tol_0: float
+Squared norm of y, required if tol is not None.
+
+tol: float
+Targeted squared error, if not None overrides n_nonzero_coefs.
+
+copy_Gram: bool, optional
+Whether the gram matrix must be copied by the algorithm. A false
+value is only helpful if it is already Fortran-ordered, otherwise a
+copy is made anyway.
+
+copy_Xy: bool, optional
+Whether the covariance vector Xy must be copied by the algorithm.
+If False, it may be overwritten.
+
+Returns:
+--------
+gamma: array, shape = (n_nonzero_coefs,)
+Non-zero elements of the solution
+
+idx: array, shape = (n_nonzero_coefs,)
+Indices of the positions of the elements in gamma within the solution
+vector
+
+"""
+    Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
+
+    if copy_Xy:
+        Xy = Xy.copy()
+
+    min_float = np.finfo(Gram.dtype).eps
+    nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
+    potrs, = get_lapack_funcs(('potrs',), (Gram,))
+
+    indices = range(len(Gram)) # keeping track of swapping
+    alpha = Xy
+    tol_curr = tol_0
+    delta = 0
+    n_active = 0
+
+    max_features = len(Gram) if tol is not None else n_nonzero_coefs
+    L = np.empty((max_features, max_features), dtype=Gram.dtype)
+    L[0, 0] = 1.
+
+    while True:
+        lam = np.argmax(np.abs(alpha))
+        if lam < n_active or alpha[lam] ** 2 < min_float:
+            # selected same atom twice, or inner product too small
+            warn(premature)
+            break
+        if n_active > 0:
+            L[n_active, :n_active] = Gram[lam, :n_active]
+            solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
+            v = nrm2(L[n_active, :n_active]) ** 2
+            if 1 - v <= min_float: # selected atoms are dependent
+                warn(premature)
+                break
+            L[n_active, n_active] = np.sqrt(1 - v)
+        Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
+        Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
+        indices[n_active], indices[lam] = indices[lam], indices[n_active]
+        Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
+        n_active += 1
+        # solves LL'x = y as a composition of two triangular systems
+        gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
+                         overwrite_b=False)
+
+        beta = np.dot(Gram[:, :n_active], gamma)
+        alpha = Xy - beta
+        if tol is not None:
+            tol_curr += delta
+            delta = np.inner(gamma, beta[:n_active])
+            tol_curr -= delta
+            if tol_curr <= tol:
+                break
+        elif n_active == max_features:
+            break
+
+    return gamma, indices[:n_active]
+
+
+def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute_gram=False,
+                  copy_X=True):
+    """Orthogonal Matching Pursuit (OMP)
+
+Solves n_targets Orthogonal Matching Pursuit problems.
+An instance of the problem has the form:
+
+When parametrized by the number of non-zero coefficients using
+`n_nonzero_coefs`:
+argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
+
+When parametrized by error using the parameter `tol`:
+argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
+
+Parameters
+----------
+X: array, shape = (n_samples, n_features)
+Input data. Columns are assumed to have unit norm.
+
+y: array, shape = (n_samples,) or (n_samples, n_targets)
+Input targets
+
+n_nonzero_coefs: int
+Desired number of non-zero entries in the solution. If None (by
+default) this value is set to 10% of n_features.
+
+tol: float
+Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
+
+Nic: tol does not oveeride n_nonzero_coefs, the wto conditions act jointly!
+
+precompute_gram: {True, False, 'auto'},
+Whether to perform precomputations. Improves performance when n_targets
+or n_samples is very large.
+
+copy_X: bool, optional
+Whether the design matrix X must be copied by the algorithm. A false
+value is only helpful if X is already Fortran-ordered, otherwise a
+copy is made anyway.
+
+Returns
+-------
+coef: array, shape = (n_features,) or (n_features, n_targets)
+Coefficients of the OMP solution
+
+See also
+--------
+OrthogonalMatchingPursuit
+orthogonal_mp_gram
+lars_path
+decomposition.sparse_encode
+decomposition.sparse_encode_parallel
+
+Notes
+-----
+Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
+Matching pursuits with time-frequency dictionaries, IEEE Transactions on
+Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
+(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
+
+This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
+M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
+Matching Pursuit Technical Report - CS Technion, April 2008.
+http://www.cs.technion.ac.il/~ronrubin/Publications/KSVX-OMP-v2.pdf
+
+"""
+    X, y = map(np.asanyarray, (X, y))
+    if y.ndim == 1:
+        y = y[:, np.newaxis]
+    if copy_X:
+        X = X.copy('F')
+        copy_X = False
+    else:
+        X = np.asfortranarray(X)
+    if y.shape[1] > 1: # subsequent targets will be affected
+        copy_X = True
+    if n_nonzero_coefs == None and tol == None:
+        n_nonzero_coefs = int(0.1 * X.shape[1])
+    if tol is not None and tol < 0:
+        raise ValueError("Epsilon cannot be negative")
+    if tol is None and n_nonzero_coefs <= 0:
+        raise ValueError("The number of atoms must be positive")
+    if tol is None and n_nonzero_coefs > X.shape[1]:
+        raise ValueError("The number of atoms cannot be more than the number \
+of features")
+    if precompute_gram == 'auto':
+        precompute_gram = X.shape[0] > X.shape[1]
+    if precompute_gram:
+        G = np.dot(X.T, X)
+        G = np.asfortranarray(G)
+        Xy = np.dot(X.T, y)
+        if tol is not None:
+            norms_squared = np.sum((y ** 2), axis=0)
+        else:
+            norms_squared = None
+        return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
+                                  copy_Gram=copy_X, copy_Xy=False)
+
+    coef = np.zeros((X.shape[1], y.shape[1]))
+    for k in xrange(y.shape[1]):
+        x, idx = _cholesky_omp(X, y[:, k], n_nonzero_coefs, tol,
+                               copy_X=copy_X)
+        coef[idx, k] = x
+    return np.squeeze(coef)
+
+
+def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
+                       norms_squared=None, copy_Gram=True,
+                       copy_Xy=True):
+    """Gram Orthogonal Matching Pursuit (OMP)
+
+Solves n_targets Orthogonal Matching Pursuit problems using only
+the Gram matrix X.T * X and the product X.T * y.
+
+Parameters
+----------
+Gram: array, shape = (n_features, n_features)
+Gram matrix of the input data: X.T * X
+
+Xy: array, shape = (n_features,) or (n_features, n_targets)
+Input targets multiplied by X: X.T * y
+
+n_nonzero_coefs: int
+Desired number of non-zero entries in the solution. If None (by
+default) this value is set to 10% of n_features.
+
+tol: float
+Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
+
+norms_squared: array-like, shape = (n_targets,)
+Squared L2 norms of the lines of y. Required if tol is not None.
+
+copy_Gram: bool, optional
+Whether the gram matrix must be copied by the algorithm. A false
+value is only helpful if it is already Fortran-ordered, otherwise a
+copy is made anyway.
+
+copy_Xy: bool, optional
+Whether the covariance vector Xy must be copied by the algorithm.
+If False, it may be overwritten.
+
+Returns
+-------
+coef: array, shape = (n_features,) or (n_features, n_targets)
+Coefficients of the OMP solution
+
+See also
+--------
+OrthogonalMatchingPursuit
+orthogonal_mp
+lars_path
+decomposition.sparse_encode
+decomposition.sparse_encode_parallel
+
+Notes
+-----
+Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
+Matching pursuits with time-frequency dictionaries, IEEE Transactions on
+Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
+(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
+
+This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
+M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
+Matching Pursuit Technical Report - CS Technion, April 2008.
+http://www.cs.technion.ac.il/~ronrubin/Publications/KSVX-OMP-v2.pdf
+
+"""
+    Gram, Xy = map(np.asanyarray, (Gram, Xy))
+    if Xy.ndim == 1:
+        Xy = Xy[:, np.newaxis]
+        if tol is not None:
+            norms_squared = [norms_squared]
+
+    if n_nonzero_coefs == None and tol is None:
+        n_nonzero_coefs = int(0.1 * len(Gram))
+    if tol is not None and norms_squared == None:
+        raise ValueError('Gram OMP needs the precomputed norms in order \
+to evaluate the error sum of squares.')
+    if tol is not None and tol < 0:
+        raise ValueError("Epsilon cennot be negative")
+    if tol is None and n_nonzero_coefs <= 0:
+        raise ValueError("The number of atoms must be positive")
+    if tol is None and n_nonzero_coefs > len(Gram):
+        raise ValueError("The number of atoms cannot be more than the number \
+of features")
+    coef = np.zeros((len(Gram), Xy.shape[1]))
+    for k in range(Xy.shape[1]):
+        x, idx = _gram_omp(Gram, Xy[:, k], n_nonzero_coefs,
+                           norms_squared[k] if tol is not None else None, tol,
+                           copy_Gram=copy_Gram, copy_Xy=copy_Xy)
+        coef[idx, k] = x
+    return np.squeeze(coef)
+
+
+class OrthogonalMatchingPursuit(LinearModel):
+    """Orthogonal Mathching Pursuit model (OMP)
+
+Parameters
+----------
+n_nonzero_coefs: int, optional
+Desired number of non-zero entries in the solution. If None (by
+default) this value is set to 10% of n_features.
+
+tol: float, optional
+Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
+
+fit_intercept: boolean, optional
+whether to calculate the intercept for this model. If set
+to false, no intercept will be used in calculations
+(e.g. data is expected to be already centered).
+
+normalize: boolean, optional
+If False, the regressors X are assumed to be already normalized.
+
+precompute_gram: {True, False, 'auto'},
+Whether to use a precomputed Gram and Xy matrix to speed up
+calculations. Improves performance when `n_targets` or `n_samples` is
+very large. Note that if you already have such matrices, you can pass
+them directly to the fit method.
+
+copy_X: bool, optional
+Whether the design matrix X must be copied by the algorithm. A false
+value is only helpful if X is already Fortran-ordered, otherwise a
+copy is made anyway.
+
+copy_Gram: bool, optional
+Whether the gram matrix must be copied by the algorithm. A false
+value is only helpful if X is already Fortran-ordered, otherwise a
+copy is made anyway.
+
+copy_Xy: bool, optional
+Whether the covariance vector Xy must be copied by the algorithm.
+If False, it may be overwritten.
+
+
+Attributes
+----------
+coef_: array, shape = (n_features,) or (n_features, n_targets)
+parameter vector (w in the fomulation formula)
+
+intercept_: float or array, shape =(n_targets,)
+independent term in decision function.
+
+Notes
+-----
+Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
+Matching pursuits with time-frequency dictionaries, IEEE Transactions on
+Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
+(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
+
+This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
+M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
+Matching Pursuit Technical Report - CS Technion, April 2008.
+http://www.cs.technion.ac.il/~ronrubin/Publications/KSVX-OMP-v2.pdf
+
+See also
+--------
+orthogonal_mp
+orthogonal_mp_gram
+lars_path
+Lars
+LassoLars
+decomposition.sparse_encode
+decomposition.sparse_encode_parallel
+
+"""
+    def __init__(self, copy_X=True, copy_Gram=True,
+            copy_Xy=True, n_nonzero_coefs=None, tol=None,
+            fit_intercept=True, normalize=True, precompute_gram=False):
+        self.n_nonzero_coefs = n_nonzero_coefs
+        self.tol = tol
+        self.fit_intercept = fit_intercept
+        self.normalize = normalize
+        self.precompute_gram = precompute_gram
+        self.copy_Gram = copy_Gram
+        self.copy_Xy = copy_Xy
+        self.copy_X = copy_X
+
+    def fit(self, X, y, Gram=None, Xy=None):
+        """Fit the model using X, y as training data.
+
+Parameters
+----------
+X: array-like, shape = (n_samples, n_features)
+Training data.
+
+y: array-like, shape = (n_samples,) or (n_samples, n_targets)
+Target values.
+
+Gram: array-like, shape = (n_features, n_features) (optional)
+Gram matrix of the input data: X.T * X
+
+Xy: array-like, shape = (n_features,) or (n_features, n_targets)
+(optional)
+Input targets multiplied by X: X.T * y
+
+
+Returns
+-------
+self: object
+returns an instance of self.
+"""
+        X = np.atleast_2d(X)
+        y = np.atleast_1d(y)
+        n_features = X.shape[1]
+
+        X, y, X_mean, y_mean, X_std = self._center_data(X, y,
+                                                        self.fit_intercept,
+                                                        self.normalize,
+                                                        self.copy_X)
+
+        if self.n_nonzero_coefs == None and self.tol is None:
+            self.n_nonzero_coefs = int(0.1 * n_features)
+
+        if Gram is not None:
+            Gram = np.atleast_2d(Gram)
+
+            if self.copy_Gram:
+                copy_Gram = False
+                Gram = Gram.copy('F')
+            else:
+                Gram = np.asfortranarray(Gram)
+
+            copy_Gram = self.copy_Gram
+            if y.shape[1] > 1: # subsequent targets will be affected
+                copy_Gram = True
+
+            if Xy is None:
+                Xy = np.dot(X.T, y)
+            else:
+                if self.copy_Xy:
+                    Xy = Xy.copy()
+                if self.normalize:
+                    if len(Xy.shape) == 1:
+                        Xy /= X_std
+                    else:
+                        Xy /= X_std[:, np.newaxis]
+
+            if self.normalize:
+                Gram /= X_std
+                Gram /= X_std[:, np.newaxis]
+
+            norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
+            self.coef_ = orthogonal_mp_gram(Gram, Xy, self.n_nonzero_coefs,
+                                            self.tol, norms_sq,
+                                            copy_Gram, True).T
+        else:
+            precompute_gram = self.precompute_gram
+            if precompute_gram == 'auto':
+                precompute_gram = X.shape[0] > X.shape[1]
+            self.coef_ = orthogonal_mp(X, y, self.n_nonzero_coefs, self.tol,
+                                       precompute_gram=self.precompute_gram,
+                                       copy_X=self.copy_X).T
+
+        self._set_intercept(X_mean, y_mean, X_std)
+        return self
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/l1qc_test.py	Fri Oct 21 13:53:49 2011 +0000
@@ -0,0 +1,7 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Oct 21 14:28:08 2011
+
+@author: Nic
+"""
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/omp_test.py	Fri Oct 21 13:53:49 2011 +0000
@@ -0,0 +1,157 @@
+""" 
+#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#
+# Bob L. Sturm <bst@create.aau.dk> 20111018
+# Department of Architecture, Design and Media Technology
+# Aalborg University Copenhagen
+# Lautrupvang 15, 2750 Ballerup, Denmark
+#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#
+"""
+
+import unittest
+
+import numpy as np
+from sklearn.utils import check_random_state
+import time
+
+from omp_sk_bugfix import orthogonal_mp
+from omp_QR import greed_omp_qr
+from omp_QR import omp_qr
+
+"""
+Run a problem suite involving sparse vectors in 
+ambientDimension dimensional space, with a resolution
+in the phase plane of numGradations x numGradations,
+and at each indeterminacy and sparsity pair run
+numTrials independent trials.
+
+Outputs a text file denoting successes at each phase point.
+For more on phase transitions, see:
+D. L. Donoho and J. Tanner, "Precise undersampling theorems," 
+Proc. IEEE, vol. 98, no. 6, pp. 913-924, June 2010.
+"""
+
+class CompareResults(unittest.TestCase):
+    
+    def testCompareResults(self):
+      """OMP results should be almost the same with all implementations"""
+      ambientDimension = 400
+      numGradations = 30
+      numTrials = 1
+      runProblemSuite(ambientDimension,numGradations,numTrials, verbose=False) 
+
+
+
+def runProblemSuite(ambientDimension,numGradations,numTrials, verbose):
+
+  idx = np.arange(ambientDimension)
+  phaseDelta = np.linspace(0.05,1,numGradations)
+  phaseRho = np.linspace(0.05,1,numGradations)
+  success = np.zeros((numGradations, numGradations))
+  
+  #Nic: init timers
+  t1all = 0
+  t2all = 0
+  t3all = 0
+  
+  deltaCounter = 0
+  # delta is number of measurements/
+  for delta in phaseDelta[:17]:
+    rhoCounter = 0
+    for rho in phaseRho:
+      if verbose:
+          print(deltaCounter,rhoCounter)
+          
+      numMeasurements = int(delta*ambientDimension)
+      sparsity = int(rho*numMeasurements)
+      # how do I set the following to be random each time?
+      generator = check_random_state(100)
+      # create unit norm dictionary
+      D = generator.randn(numMeasurements, ambientDimension)
+      D /= np.sqrt(np.sum((D ** 2), axis=0))
+      # compute Gramian (for efficiency)
+      DTD = np.dot(D.T,D)
+  
+      successCounter = 0
+      trial = numTrials
+      while trial > 0:
+        # generate sparse signal with a minimum non-zero value
+        x = np.zeros((ambientDimension, 1))
+        idx2 = idx
+        generator.shuffle(idx2)
+        idx3 = idx2[:sparsity]
+        while np.min(np.abs(x[idx3,0])) < 1e-10 :
+           x[idx3,0] = generator.randn(sparsity)
+        # sense sparse signal
+        y = np.dot(D, x)
+        
+        # Nic: Use sparsify OMP function (translated from Matlab)
+        ompopts = dict({'stopCrit':'M', 'stopTol':2*sparsity})
+        starttime = time.time()                     # start timer
+        x_r2, errs, times = greed_omp_qr(y.squeeze().copy(), D.copy(), D.shape[1], ompopts)
+        t2all = t2all + time.time() - starttime     # stop timer
+        idx_r2 = np.nonzero(x_r2)[0]        
+        
+        # run to two times expected sparsity, or tolerance
+        # why? Often times, OMP can retrieve the correct solution
+        # when it is run for more than the expected sparsity
+        #x_r, idx_r = omp_qr(y,D,DTD,2*sparsity,1e-5)
+        # Nic: adjust tolerance to match with other function
+        starttime = time.time()                     # start timer
+        x_r, idx_r = omp_qr(y.copy(),D.copy(),DTD.copy(),2*sparsity,numMeasurements*1e-14/np.vdot(y,y))
+        t1all = t1all + time.time() - starttime     # stop timer        
+        
+        # Nic: test sklearn omp
+        starttime = time.time()                     # start timer
+        x_r3 = orthogonal_mp(D.copy(), y.copy(), 2*sparsity, tol=numMeasurements*1e-14, precompute_gram=False, copy_X=True)
+        idx_r3 = np.nonzero(x_r3)[0]
+        t3all = t3all + time.time() - starttime     # stop timer        
+        
+        # Nic: compare results
+        if verbose:
+            print 'diff1 = ',np.linalg.norm(x_r.squeeze() - x_r2.squeeze())
+            print 'diff2 = ',np.linalg.norm(x_r.squeeze() - x_r3.squeeze())
+            print 'diff3 = ',np.linalg.norm(x_r2.squeeze() - x_r3.squeeze())
+            print "Bob's total time = ", t1all
+            print "Nic's total time = ", t2all
+            print "Skl's total time = ", t3all
+        if np.linalg.norm(x_r.squeeze() - x_r2.squeeze()) > 1e-6 or \
+           np.linalg.norm(x_r.squeeze() - x_r3.squeeze()) > 1e-6 or \
+           np.linalg.norm(x_r2.squeeze() - x_r3.squeeze()) > 1e-6:
+               if verbose:
+                   print "STOP: Different results"
+                   print "Bob's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r).squeeze())
+                   print "Nic's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r2).squeeze())
+                   print "Skl's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r3).squeeze())
+               raise ValueError("Different results")
+  
+        # debais to remove small entries
+        for nn in idx_r:
+          if abs(x_r[nn]) < 1e-10:
+            x_r[nn] = 0
+  
+        # exact recovery condition using support
+        #if sorted(np.flatnonzero(x_r)) == sorted(np.flatnonzero(x)):
+        #  successCounter += 1
+        # exact recovery condition using error in solution
+        error = x - x_r
+        """ the following is the exact recovery condition in: A. Maleki 
+              and D. L. Donoho, "Optimally tuned iterative reconstruction 
+              algorithms for compressed sensing," IEEE J. Selected Topics 
+              in Signal Process., vol. 4, pp. 330-341, Apr. 2010. """
+        if np.vdot(error,error) < np.vdot(x,x)*1e-4:
+          successCounter += 1
+        trial -= 1
+  
+      success[rhoCounter,deltaCounter] = successCounter
+      if successCounter == 0:
+        break
+  
+      rhoCounter += 1
+      #np.savetxt('test.txt',success,fmt='#2.1d',delimiter=',')
+    deltaCounter += 1
+    
+if __name__ == "__main__":
+    
+    unittest.main(verbosity=2)    
+    #suite = unittest.TestLoader().loadTestsFromTestCase(CompareResults)
+    #unittest.TextTestRunner(verbosity=2).run(suite)    
\ No newline at end of file