changeset 154:0de08f68256b ivand_dev

ALPS toolbox - Algebraic Pursuit added to smallbox
author Ivan Damnjanovic lnx <ivan.damnjanovic@eecs.qmul.ac.uk>
date Fri, 12 Aug 2011 11:17:47 +0100
parents af307f247ac7
children b14209313ba4
files examples/ALPS solvers tests/SMALL_solver_test_ALPS.m examples/AudioInpainting/Audio_Declipping_Example.m toolboxes/alps/ALPS/AlgebraicPursuit.m toolboxes/alps/ALPS/cgsolve.m toolboxes/alps/ALPS/configuration.m toolboxes/alps/ALPS/infty_ALPS.m toolboxes/alps/ALPS/l_ALPS.m toolboxes/alps/ALPS/one_ALPS.m toolboxes/alps/ALPS/thresh.m toolboxes/alps/ALPS/zero_ALPS.m toolboxes/alps/ALPSbenchmark_errornorm_vs_iter.m toolboxes/alps/ALPSdemo.m toolboxes/alps/README toolboxes/alps/Report_bibtex.bib toolboxes/alps/generate_matrix.m toolboxes/alps/generate_vector.m toolboxes/wrapper_ALPS_toolbox.m util/SMALL_plot.m util/SMALL_solve.m
diffstat 19 files changed, 2073 insertions(+), 18 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/examples/ALPS solvers tests/SMALL_solver_test_ALPS.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,163 @@
+%%  Example test of solvers from different toolboxes on Sparco problem 6
+%
+%   The main purpose of this example is to show how to use SMALL structure
+%   to solve SPARCO compressed sensing problems (1-11) and compare results
+%   from different solvers.
+%   To generate SMALL.Problem part of structure you can use generateProblem
+%   function from Sparco toolbox giving the problem number and any
+%   additional parameters you might want to change. Alternatively, you can
+%   might want to consult sparco documentation to write a problem by
+%   yourself. There are four fields the must be specified in SMALL.Problem 
+%   - A, b, sizeA and reconstruct.
+%   
+%   To generate SMALL.solver part of the structure you must specify three
+%   fields:
+%   
+%       SMALL.solver.toolbox - string with toolbox name is needed because
+%                              different toolboxes are calling solver 
+%                              functions in different ways.
+%       SMALL.solver.name - its string representing solver name (e.g.
+%                           SolveOMP)
+%       SMALL.solver.param - string that contains optional parameters for
+%                            particular solver (all parameters you want to
+%                            specify except A, b and size of solution)
+%                            
+%   Every call to SMALL_solve function will generate following output:
+%
+%       SMALL.solver.solution - contains solution vector x
+%       SMALL.solver.reconstructed - vector containing signal reconstructed
+%                                    from the solution
+%       SMALL.solver.time - time that solver spent to find the solution
+%           
+%   SMALL_plot function plots the SMALL.solver.solution and reconstructed
+%   against original signal.
+%   
+%   In this particular example we are testing SMALL_cgp, SMALL_chol, 
+%   SolveOMP form SparseLab and greed_pcgp form Sparsify against "PROB006  
+%   Daubechies basis, Gaussian ensemble measurement basis, piecewise cubic 
+%   polynomial signal" from Sparco. 
+%   
+%
+
+
+%   Centre for Digital Music, Queen Mary, University of London.
+%   This file copyright 2009 Ivan Damnjanovic.
+%
+%   This program is free software; you can redistribute it and/or
+%   modify it under the terms of the GNU General Public License as
+%   published by the Free Software Foundation; either version 2 of the
+%   License, or (at your option) any later version.  See the file
+%   COPYING included with this distribution for more information.
+%%   
+
+fprintf('\n\nExample test of SMALL solvers against their counterparts on Sparco problems.\n\n');
+
+%%
+% Generate SPARCO problem 
+clear  
+
+SMALL.Problem = generateProblem(6, 'P', 6, 'm', 2*500,'n',2*1024, 'show');
+
+SMALL.Problem.A = opToMatrix(SMALL.Problem.A, 1);
+
+%%
+i=1;
+%%
+% ALPS test
+SMALL.solver(i) = SMALL_init_solver;
+SMALL.solver(i).toolbox = 'ALPS';
+SMALL.solver(i).name = 'AlgebraicPursuit';
+
+% In the following string all parameters except matrix, measurement vector
+% and size of solution need to be specified. If you are not sure which
+% parameters are needed for particular solver type "help <Solver name>" in
+% MATLAB command line
+
+SMALL.solver(i).param=struct(...
+    'sparsity', 125,... 
+    'memory', 0,...
+    'mode', 1,...
+    'iternum', 50,... 
+    'tolerance', 1e-14');
+
+SMALL.solver(i)=SMALL_solve(SMALL.Problem,SMALL.solver(i));
+
+
+i=i+1;
+%%
+% SMALL Conjugate Gradient test 
+SMALL.solver(i)=SMALL_init_solver;
+SMALL.solver(i).toolbox='SMALL';    
+SMALL.solver(i).name='SMALL_cgp';
+
+% In the following string all parameters except matrix, measurement vector
+% and size of solution need to be specified. If you are not sure which
+% parameters are needed for particular solver type "help <Solver name>" in
+% MATLAB command line
+
+SMALL.solver(i).param='200, 1e-14';
+
+SMALL.solver(i)=SMALL_solve(SMALL.Problem,SMALL.solver(i));
+
+
+i=i+1;
+
+%%
+% SolveOMP from SparseLab test 
+
+SMALL.solver(i)=SMALL_init_solver;
+SMALL.solver(i).toolbox='SparseLab';  
+SMALL.solver(i).name='SolveOMP';
+
+% In the following string all parameters except matrix, measurement vector
+% and size of solution need to be specified. If you are not sure which
+% parameters are needed for particular solver type "help <Solver name>" in
+% MATLAB command line
+
+SMALL.solver(i).param='200, 0, 0, 0, 1e-14';
+
+SMALL.solver(i)=SMALL_solve(SMALL.Problem, SMALL.solver(i));
+
+i=i+1;
+  
+%%
+% SMALL OMP with Cholesky update test 
+SMALL.solver(i)=SMALL_init_solver;
+SMALL.solver(i).toolbox='SMALL';    
+SMALL.solver(i).name='SMALL_chol';
+
+% In the following string all parameters except matrix, measurement vector
+% and size of solution need to be specified. If you are not sure which
+% parameters are needed for particular solver type "help <Solver name>" in
+% MATLAB command line
+
+SMALL.solver(i).param='200, 1e-14';
+
+SMALL.solver(i)=SMALL_solve(SMALL.Problem, SMALL.solver(i));
+
+i=i+1;
+
+%%
+% greed_pcgp from Sparsify test 
+
+SMALL.solver(i)=SMALL_init_solver;
+SMALL.solver(i).toolbox='Sparsify';  
+SMALL.solver(i).name='greed_pcgp';
+
+% In the following string all parameters except matrix, measurement vector
+% and size of solution need to be specified. If you are not sure which
+% parameters are needed for particular solver type "help <Solver name>" in
+% MATLAB command line
+
+SMALL.solver(i).param='''stopCrit'', ''M'', ''stopTol'', 200';
+
+SMALL.solver(i)=SMALL_solve(SMALL.Problem, SMALL.solver(i));
+
+%%
+
+SMALL_plot(SMALL);
+  
+
+  
+ 
+
--- a/examples/AudioInpainting/Audio_Declipping_Example.m	Fri Jul 29 12:35:52 2011 +0100
+++ b/examples/AudioInpainting/Audio_Declipping_Example.m	Fri Aug 12 11:17:47 2011 +0100
@@ -49,7 +49,7 @@
 
 %   Defining the Problem structure
 
-SMALL.Problem = generateAudioDeclippingProblem('male01_8kHz.wav', 0.6, 256, 0.5, @wRect, @wSine, @wRect, @Gabor_Dictionary, 2);
+SMALL.Problem = generateAudioDeclippingProblem('', 0.6, 256, 0.5, @wRect, @wSine, @wRect, @Gabor_Dictionary, 2);
 
 for idxSolver = 1:4
     
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/ALPS/AlgebraicPursuit.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,264 @@
+function [x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, varargin)
+% =========================================================================
+%              Algebraic Pursuit algorithms - Beta Version
+% =========================================================================
+% Algebraic Pursuit (ALPS) algorithms are accelerated hard thresholding methods 
+% on sparse recovery of linear inverse systems. In particular, let
+%             Phi : M x N real matrix (M < N)
+%             x*  : N x 1 K-sparse data vector
+%             n   : N x 1 additive noise vector
+% then, y = Phi x* + n is the undersampled M x 1 measurement vector. ALPS 
+% solve the following minimization problem 
+%          minimize ||y - Phi x||^2    subject to x is K-sparse vector. 
+% 
+% Detailed discussion on the algorithm can be found in 
+% [1] "On Accelerated Hard Thresholding Methods for Sparse Approximation", written
+% by Volkan Cevher, Technical Report, 2011.
+% =========================================================================
+% INPUT ARGUMENTS:
+% y                         M x 1 undersampled measurement vector.
+% Phi                       M x N regression matrix.
+% K                         Sparsity of underlying vector x* or desired
+%                           sparsity of solution.
+% varargin                  Set of parameters. These are:
+%
+%    memory,...             Memory (momentum) of proposed algorithm. 
+%                           Possible values are 0,1,'infty' for memoryless,
+%                           one memory and infinity memory ALPS,
+%                           respectively. Default value: memory = 0.
+%    tol,...                Early stopping tolerance. Default value: tol =
+%                           1-e5.
+%    ALPSiters,...          Maximum number of algorithm iterations. Default
+%                           value: 300.  
+%    mode, ...              According to [1], possible values are
+%                           [0,1,2,4,5,6]. This value comes from the binary 
+%                           representation of the parameters:
+%                           (solveNewtob, gradientDescentx, solveNewtonx), 
+%                           which are explained next. Default value = 0.
+%    mu,...                 Variable that controls the step size selection. 
+%                           When mu = 0, step size is computed adaptively 
+%                           per iteration. Default value: mu = 0. 
+%    tau,...                Variable that controls the momentum in
+%                           non-memoryless case. Ignored in memoryless
+%                           case. User can insert as value a function handle on tau.
+%                           Description given below. Default value: tau = -1. 
+%    CGiters,...            Maximum iterations for Conjugate-Gradients method.
+%    CGtol,...              Tolerance variable for Conjugate-Gradients method.
+%    verbose                verbose = 1 prints out execution infromation.
+% =========================================================================
+% DESCRIPTION OF MODE VARIABLE:
+%    solveNewtonb,...       If solveNewtonb == 1: Corresponds to solving a
+%                           Newton system restricted to a sparse support.
+%                           It is implemented via conjugate gradients. If
+%                           solveNewtonb == 0: Step size selection as
+%                           described in eqs. (12) and (13) in [1]. Default
+%                           value: solveNewtonb = 0. For infty memory case,
+%                           solveNewtonb = 0.
+%    gradientDescentx,...   If gradientDescentx == 1: single gradient
+%                           update of x_{i+1} restricted ot its support
+%                           with line search. Default value:
+%                           gradientDescentx = 1.
+%    solveNewtonx,...       If solveNewtonx == 1: Akin to Hard Thresholding
+%                           Pursuit (c.f. Simon Foucart, "Hard Thresholding
+%                           Pursuit," preprint, 2010). Default vale:
+%                           solveNewtonx = 0.
+% =========================================================================
+% DESCRIPTION OF SPECIAL TAU VALUES:
+%    tau = -1,...           Tau computed as the minimized of the objective 
+%                           function:
+%
+%                                  <u - Ax_i, Phi(x_i - x_{i-1})>
+%                             tau = -----------------------------.
+%                                  <u - Ax_i, Phi(x_i - x_{i-1})>
+%
+%    tau = 0,...            Follows FISTA weight configuration at each
+%                           iteration. For more information, please read "A 
+%                           Fast Iterative Shrinkage-Thresholding Algorithm
+%                           for Linear Inverse Problems", written by A.
+%                           Beck and M. Teboulle, SIAM J. Imaging Sciences,
+%                           vol. 2, no. 1, 2009.
+% =========================================================================
+% OUTPUT ARGUMENTS:
+% x_hat                     N x 1 recovered K-sparse vector.
+% numiter                   Number of iterations executed.
+% time                      Execution time in seconds.
+% x_path                    Keeps a series of computed N x 1 K-sparse vectors 
+%                           until the end of the iterative process.
+% =========================================================================
+% 01/04/2011, by Anastasios Kyrillidis. anastasios.kyrillidis@epfl.ch, EPFL.
+% =========================================================================
+% cgsolve.m is written by Justin Romberg, Caltech, Oct. 2005.
+%                         Email: jrom@acm.caltech.edu
+% =========================================================================
+% This work was supported in part by the European Commission under Grant 
+% MIRG-268398 and DARPA KeCoM program #11-DARPA-1055. VC also would like 
+% to acknowledge Rice University for his Faculty Fellowship.
+% =========================================================================
+
+x_hat = 0;
+time = 0;
+x_path = 0;
+
+params.memory = 0;
+params.tol = 1e-5;
+params.ALPSiters = 300;
+params.mode = 0;
+params.tau = 1/2;
+params.memory = 0;
+params.cg_maxiter = 50;
+params.cg_tol = 1e-8;
+params.useCG = 0;
+params.verbose = 0;
+params.mu = 0;
+
+for in_arg = 1:2:length(varargin)
+    if (strcmp(varargin{in_arg}, 'memory'))
+        params.memory = varargin{in_arg+1};
+    end
+    if (strcmp(varargin{in_arg}, 'mode'))
+        params.mode = varargin{in_arg+1};
+    end
+    if (strcmp(varargin{in_arg}, 'tolerance'))
+        params.tol = varargin{in_arg+1};
+    end
+    if (strcmp(varargin{in_arg}, 'ALPSiterations'))
+        params.ALPSiters = varargin{in_arg+1};
+    end
+    if (strcmp(varargin{in_arg}, 'tau'))
+        params.tau = varargin{in_arg+1};
+    end
+    if (strcmp(varargin{in_arg}, 'mu'))
+        params.mu = varargin{in_arg+1};
+    end
+    if (strcmp(varargin{in_arg}, 'useCG'))
+        params.useCG = varargin{in_arg+1};
+    end
+    if (strcmp(varargin{in_arg}, 'CGiters'))
+        params.cg_maxiter = varargin{in_arg+1};
+    end
+    if (strcmp(varargin{in_arg}, 'CGtol'))
+        params.cg_tol = varargin{in_arg+1};
+    end
+    if (strcmp(varargin{in_arg}, 'verbose'))
+        params.verbose = varargin{in_arg+1};
+    end;
+end;
+
+%% Check input
+
+if (ischar(params.memory))
+    if ~(sum(params.memory == 'infty') == 5)
+        disp('ERROR: Memory variable takes positive integer values or "infty" value.');
+        numiter = -1;
+        return;
+    end;
+elseif (params.memory < 0 || ~isinteger(uint8(params.memory)))
+    disp('ERROR: Memory variable takes positive integer values or "infty" value.');
+    numiter = -1;
+    return;
+end;
+
+if (params.mode ~= 0 && params.mode ~= 1 && params.mode ~= 2 && params.mode ~= 4 && params.mode ~= 5 && params.mode ~= 6)
+    disp('ERROR: Mode variable takes values from range [0,1,2,4,5,6].');
+    numiter = -1;
+    return;
+end;
+
+if (params.tol < 0 || ~isnumeric(params.tol))
+    disp('ERROR: tolerance variable must be positive.');
+    numiter = -1;
+    return;
+end;
+
+
+if (params.mode == 0 || params.mode == 2)
+    params.useCG = 0;
+end;
+
+y = y(:);
+
+M_y = length(y);
+[M_Phi, tmpArg] = size(Phi);
+
+if (params.mode == 4 || params.mode == 5 || params.mode == 6)
+    if (M_Phi < 2*K) 
+        disp('WARNING: Newton system may be ill-conditioned... Press any key to continue');
+        pause;
+    end;
+end;
+
+[params.solveNewtonb, params.gradientDescentx, params.solveNewtonx] = configuration(params.mode);
+
+if (M_y ~= M_Phi)
+    error('Inconsistent sampling matrix...');
+end;
+
+switch params.memory
+    case 0,        
+        tic;
+        [x_hat, numiter, x_path] = zero_ALPS(y, Phi, K, params);
+        time = toc;
+        if (params.verbose == 1)
+            str = sprintf('%d-ALPS(%d) time: %s', params.memory, params.mode, num2str(time));
+            disp(str);
+            str = sprintf('Number of iterations: %d', numiter);
+            disp(str);
+        end;
+    case 1,
+        tic;
+        [x_hat, numiter, x_path] = one_ALPS(y, Phi, K, params);
+        time = toc;
+        if (params.verbose == 1)
+            if (isa(params.tau,'float'))
+                if (params.tau == 0)
+                    str = sprintf('%d-ALPS(%d) - fista tau - time: %s', params.memory, params.mode, num2str(time));
+                elseif (params.tau == -1)
+                    str = sprintf('%d-ALPS(%d) - optimized tau - time: %s', params.memory, params.mode, num2str(time));
+                else
+                    str = sprintf('%d-ALPS(%d) - tau = %.3f - time: %s', params.memory, params.mode, params.tau, num2str(time));
+                end;
+            elseif (isa(params.tau, 'function_handle'))
+                str = sprintf('%d-ALPS(%d) - user derfined tau - time: %s', params.memory, params.mode, num2str(time));
+            end;
+            disp(str);
+            str = sprintf('Number of iterations: %d', numiter);
+            disp(str);
+        end;   
+    case 'infty',
+        tic;
+        [x_hat, numiter, x_path] = infty_ALPS(y, Phi, K, params);
+        time = toc;
+        if (params.verbose == 1)
+            if (isa(params.tau,'float'))
+                if (params.tau == -1)
+                    str = sprintf('infty-ALPS(%d) - optimized tau - time: %s', params.mode, num2str(time));
+                else
+                    str = sprintf('infty-ALPS(%d) - tau = %.3f - time: %s', params.mode, params.tau, num2str(time));
+                end;
+            elseif (isa(params.tau,'function_handle'))
+                str = sprintf('infty-ALPS(%d) - user derfined tau - time: %s', params.mode, num2str(time));
+            end;
+            disp(str);
+            str = sprintf('Number of iterations: %d', numiter);
+            disp(str);   
+        end;        
+    otherwise
+        tic;
+        [x_hat, numiter, x_path] = l_ALPS(y, Phi, K, params);
+        time = toc;
+        if (params.verbose == 1)
+            if (isa(params.tau,'float'))
+                if (params.tau == -1)
+                    str = sprintf('%d-ALPS(%d) - optimized tau - time: %s', params.memory, params.mode, num2str(time));
+                else
+                    str = sprintf('%d-ALPS(%d) - tau = %.3f - time: %s', params.memory, params.mode, params.tau, num2str(time));
+                end;
+            elseif (isa(params.tau,'function_handle'))
+                str = sprintf('%d-ALPS(%d) - user derfined tau - time: %s', params.memory, params.mode, num2str(time));
+            end;
+            disp(str);
+            str = sprintf('Number of iterations: %d', numiter);
+            disp(str);        
+        end;
+end;
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/ALPS/cgsolve.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,76 @@
+% cgsolve.m
+%
+% Solve a symmetric positive definite system Ax = b via conjugate gradients.
+%
+% Usage: [x, res, iter] = cgsolve(A, b, tol, maxiter, verbose)
+%
+% A - Either an NxN matrix, or a function handle.
+%
+% b - N vector
+%
+% tol - Desired precision.  Algorithm terminates when 
+%    norm(Ax-b)/norm(b) < tol .
+%
+% maxiter - Maximum number of iterations.
+%
+% verbose - If 0, do not print out progress messages.
+%    Default = 1.
+%
+% Written by: Justin Romberg, Caltech
+% Email: jrom@acm.caltech.edu
+% Created: October 2005
+%
+
+function [x, res, iter] = cgsolve(A, b, tol, maxiter, verbose)
+
+if (nargin < 5), verbose = 1; end
+
+implicit = isa(A,'function_handle');
+
+x = zeros(length(b),1);
+r = b;
+d = r;
+delta = r'*r;
+delta0 = b'*b;
+numiter = 0;
+bestx = x;
+bestres = sqrt(delta/delta0); 
+while ((numiter < maxiter) & (delta > tol^2*delta0))
+
+  % q = A*d
+  if (implicit), q = A(d);  else, q = A*d;  end
+ 
+  alpha = delta/(d'*q);
+  x = x + alpha*d;
+  
+  if (mod(numiter+1,50) == 0)
+    % r = b - Aux*x
+    if (implicit), r = b - A(x);  else, r = b - A*x;  end
+  else
+    r = r - alpha*q;
+  end
+  
+  deltaold = delta;
+  delta = r'*r;
+  beta = delta/deltaold;
+  d = r + beta*d;
+  numiter = numiter + 1;
+  if (sqrt(delta/delta0) < bestres)
+    bestx = x;
+    bestres = sqrt(delta/delta0);
+  end    
+  
+  if ((verbose) & (mod(numiter,50)==0))
+    disp(sprintf('cg: Iter = %d, Best residual = %8.3e, Current residual = %8.3e', ...
+      numiter, bestres, sqrt(delta/delta0)));
+  end
+  
+end
+
+if (verbose)
+  disp(sprintf('cg: Iterations = %d, best residual = %14.8e', numiter, bestres));
+end
+x = bestx;
+res = bestres;
+iter = numiter;
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/ALPS/configuration.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,28 @@
+function [solveNewtonb, gradientDescentx, solveNewtonx] = configuration(mode)
+% Function that maps 'mode' value into (SolveNewtonb, GradientDescentx, SolveNewtonx) configuration
+
+if (mode == 0)
+    solveNewtonb = 0;
+    gradientDescentx = 0;
+    solveNewtonx = 0;
+elseif (mode == 1)
+    solveNewtonb = 0;
+    gradientDescentx = 0;
+    solveNewtonx = 1;
+elseif (mode == 2)
+    solveNewtonb = 0;
+    gradientDescentx = 1;
+    solveNewtonx = 0;
+elseif (mode == 4)
+    solveNewtonb = 1;
+    gradientDescentx = 0;
+    solveNewtonx = 0;
+elseif (mode == 5)
+    solveNewtonb = 1;
+    gradientDescentx = 0;
+    solveNewtonx = 1;
+elseif (mode == 6)
+    solveNewtonb = 1;
+    gradientDescentx = 1;
+    solveNewtonx = 0;
+end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/ALPS/infty_ALPS.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,256 @@
+function [x_hat, numiter, x_path] = infty_ALPS(y, Phi, K, params)
+% =========================================================================
+%               infty-ALPS(#) algorithm - Beta Version
+% =========================================================================
+% Algebraic Pursuit (ALPS) algorithm with infty-memory acceleration. 
+% 
+% Detailed discussion on the algorithm can be found in 
+% [1] "On Accelerated Hard Thresholding Methods for Sparse Approximation", written
+% by Volkan Cevher, Technical Report, 2011.
+% =========================================================================
+% INPUT ARGUMENTS:
+% y                         M x 1 undersampled measurement vector.
+% Phi                       M x N regression matrix.
+% K                         Sparsity of underlying vector x* or desired
+%                           sparsity of solution.
+% params                    Structure of parameters. These are:
+%
+%    tol,...                Early stopping tolerance. Default value: tol =
+%                           1-e5
+%    ALPSiters,...          Maximum number of algorithm iterations. Default
+%                           value: 300. 
+%    solveNewtonb,...       Value: solveNewtonb = 0. Not used in infty
+%                           methods.
+%    gradientDescentx,...   If gradientDescentx == 1: single gradient
+%                           update of x_{i+1} restricted ot its support with 
+%                           line search. Default value: gradientDescentx =
+%                           1.
+%    solveNewtonx,...       If solveNewtonx == 1: Akin to Hard Thresholding Pursuit
+%                           (c.f. Simon Foucart, "Hard Thresholding Pursuit," 
+%                           preprint, 2010). Default vale: solveNewtonx = 0
+%    tau,...                Variable that controls the momentum in
+%                           non-memoryless case. Ignored in memoryless
+%                           case. Default value: tau = 1/2.
+%                           Special cases:
+%                               - tau = -1: momentum step size is automatically
+%                               optimized in every step.
+%                               - tau as a function handle: user defined
+%                               behavior of tau momentum term.
+%    mu,...                 Variable that controls the step size selection. 
+%                           When mu = 0, step size is computed adaptively 
+%                           per iteration. Default value: mu = 0. 
+%    cg_maxiter,...         Maximum iterations for Conjugate-Gradients method.
+%    cg_tol                 Tolerance variable for Conjugate-Gradients method.    
+% =========================================================================
+% OUTPUT ARGUMENTS:
+% x_hat                     N x 1 recovered K-sparse vector.
+% numiter                   Number of iterations executed.
+% x_path                    Keeps a series of computed N x 1 K-sparse vectors 
+%                           until the end of the iterative process.
+% =========================================================================
+% 01/04/2011, by Anastasios Kyrillidis. anastasios.kyrillidis@epfl.ch, EPFL.
+% =========================================================================
+% cgsolve.m is written by Justin Romberg, Caltech, Oct. 2005.
+%                         Email: jrom@acm.caltech.edu
+% =========================================================================
+% This work was supported in part by the European Commission under Grant 
+% MIRG-268398 and DARPA KeCoM program #11-DARPA-1055. VC also would like 
+% to acknowledge Rice University for his Faculty Fellowship.
+% =========================================================================
+
+[~,N] = size(Phi);
+
+%% Initialize transpose of measurement matrix
+
+Phi_t = Phi';
+
+%% Initialize to zero vector
+x_cur = zeros(N,1);
+y_cur = zeros(N,1);
+X_i = [];
+
+x_path = zeros(N, params.ALPSiters);
+
+%% CG params
+if (params.solveNewtonx == 1 || params.solveNewtonb == 1)
+    cg_verbose = 0;
+    cg_A = Phi_t*Phi;
+    cg_b = Phi_t*y;
+end;
+
+%% Determine momentum step size selection strategy
+optimizeTau = 0;
+function_tau = 0;
+
+if (isa(params.tau,'float'))
+    if (params.tau == -1)
+        optimizeTau = 1;
+    end;
+elseif (isa(params.tau, 'function_handle'))
+    function_tau = 1;
+end;
+
+%% Determine step size selection strategy
+function_mu = 0;
+adaptive_mu = 0;
+
+if (isa(params.mu,'float'))
+    function_mu = 0;
+    if (params.mu == 0)
+        adaptive_mu = 1;
+    else
+        adaptive_mu = 0;
+    end;
+elseif (isa(params.mu,'function_handle'))
+    function_mu = 1;
+end;
+
+%% Help variables
+complementary_Xi = ones(N,1);
+setXi = zeros(N,1);
+setYi = zeros(N,1);
+
+i = 1;
+%% infty-ALPS(#)
+while (i <= params.ALPSiters)
+    x_path(:,i) = x_cur;
+    x_prev = x_cur;
+    
+    % Compute the residual
+    if (i == 1)
+        res = y;
+    else
+        Phi_x_cur = Phi(:,X_i)*x_cur(X_i);
+        res = y - Phi_x_cur;
+    end;
+    
+    % Compute the derivative
+    der = Phi_t*res;
+    
+    % Determine S_i set via eq. (11)
+    complementary_Xi(X_i) = 0;    
+    [~, ind_der] = sort(abs(der).*complementary_Xi, 'descend');
+    complementary_Xi(X_i) = 1;    
+    S_i = [X_i; ind_der(1:K)];    
+    
+    ider = der(S_i);    
+    
+    setder = zeros(N,1);
+    setder(S_i) = 1;
+    if (adaptive_mu)
+        % Step size selection via eq. (12) and eq. (13)        
+        Pder = Phi(:,S_i)*ider;
+        mu_bar = ider'*ider/(Pder'*Pder);        
+    end;
+    
+    iy_cur = y_cur.*setYi;    
+    if (~function_tau)      % If tau is not a function handle...
+        if (optimizeTau)    % Compute optimized tau
+            
+            % tau = argmin || u - Phi(x_i + y_i) ||
+            %     = <Phi*y_i, u - Phi(x_i - mu/2 * grad_Si f(xi))>/||Phi*y_i||^2
+            
+            if (i == 1)
+                params.tau = 0;
+            else
+                % u - Phi*(x_i - mu/2 grad_Si f(xi)) = u - Phi*b
+                if (adaptive_mu)
+                    b = x_cur(S_i) + mu_bar*ider;        % Non-zero elems: S_i
+                elseif (function_mu)
+                    b = x_cur(S_i) + params.mu(i)*ider;
+                else b = x_cur(S_i) + params.mu*ider;
+                end;
+                
+                y_Phi_b = y - Phi(:,S_i)*b;               
+                Phi_y_prev = Phi(:,Y_i)*y_cur(Y_i);     % Phi * y_i
+                params.tau = y_Phi_b'*Phi_y_prev/(Phi_y_prev'*Phi_y_prev);
+            end;            
+            
+            if (adaptive_mu)
+                y_cur = params.tau*iy_cur + mu_bar*der.*setder;
+            elseif (function_mu)
+                y_cur = params.tau*iy_cur + params.mu(i)*der.*setder;
+            else y_cur = params.tau*iy_cur + params.mu*der.*setder;
+            end;
+            
+            Y_i = ne(y_cur,0);
+            setYi = zeros(N,1);
+            setYi(Y_i) = 1;
+        else    % Tau fixed and user-defined
+            if (adaptive_mu)
+                y_cur = params.tau*iy_cur + mu_bar*der.*setder;
+            elseif (function_mu)
+                y_cur = params.tau*iy_cur + params.mu(i)*der.*setder;
+            else y_cur = params.tau*iy_cur + params.mu*der.*setder;
+            end;
+            
+            Y_i = ne(y_cur,0);
+            setYi = zeros(N,1);
+            setYi(Y_i) = 1;
+        end;
+    else
+        if (adaptive_mu)
+            y_cur = params.tau(i)*iy_cur + mu_bar*der.*setder;
+        elseif (function_mu)
+            y_cur = params.tau(i)*iy_cur + params.mu(i)*der.*setder;
+        else y_cur = params.tau(i)*iy_cur + params.mu*der.*setder;
+        end;
+        
+        Y_i = ne(y_cur,0);
+        setYi = zeros(N,1);
+        setYi(Y_i) = 1;
+    end;
+       
+    % Hard-threshold b and compute X_{i+1}
+    set_Xi_Yi = setXi + setYi;
+    ind_Xi_Yi = find(set_Xi_Yi > 0);
+    z = x_cur(ind_Xi_Yi) + y_cur(ind_Xi_Yi);
+    [~, ind_z] = sort(abs(z), 'descend');
+    x_cur = zeros(N,1);
+    x_cur(ind_Xi_Yi(ind_z(1:K))) = z(ind_z(1:K));
+    X_i = ind_Xi_Yi(ind_z(1:K));
+    
+    setXi = zeros(N,1);
+    setXi(X_i) = 1;
+    
+    if (params.gradientDescentx == 1)
+        % Calculate gradient of estimated vector x_cur
+        Phi_x_cur = Phi(:,X_i)*x_cur(X_i);        
+        res = y - Phi_x_cur;
+        der = Phi_t*res;
+        
+        ider = der(X_i);
+
+        if (adaptive_mu)
+            Pder = Phi(:,X_i)*ider;
+            mu_bar = ider'*ider/(Pder'*Pder);
+            x_cur(X_i) = x_cur(X_i) + mu_bar*ider;
+        elseif (function_mu)
+            x_cur = x_cur(X_i) + params.mu(i)*ider;
+        else x_cur = x_cur(X_i) + params.mu*ider;
+        end;
+    elseif (params.solveNewtonx == 1)                
+        % Similar to HTP
+        if (params.useCG == 1)
+            [v, ~, ~] = cgsolve(cg_A(X_i, X_i), cg_b(X_i), params.cg_tol, params.cg_maxiter, cg_verbose);
+        else
+            v = cg_A(X_i,X_i)\cg_b(X_i);
+        end;
+        x_cur(X_i) = v;
+    end;
+   
+    % Test stopping criterion
+    if (i > 1) && (norm(x_cur - x_prev) < params.tol*norm(x_cur))
+        break;
+    end;
+    i = i + 1;
+end;
+
+x_hat = x_cur;
+numiter = i;
+
+if (i > params.ALPSiters)
+    x_path = x_path(:,1:numiter-1);
+else
+    x_path = x_path(:,1:numiter);
+end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/ALPS/l_ALPS.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,267 @@
+function [x_hat, numiter, x_path] = l_ALPS(y, Phi, K, params)
+% =========================================================================
+%                l-ALPS(#) algorithm - Beta Version
+% =========================================================================
+% Algebraic Pursuit (ALPS) algorithm with l-memory acceleration. 
+% 
+% Detailed discussion on the algorithm can be found in section III in 
+% [1] "On Accelerated Hard Thresholding Methods for Sparse Approximation", written
+% by Volkan Cevher, Technical Report, 2011.
+% =========================================================================
+% INPUT ARGUMENTS:
+% y                         M x 1 undersampled measurement vector.
+% Phi                       M x N regression matrix.
+% K                         Sparsity of underlying vector x* or desired
+%                           sparsity of solution.
+% params                    Structure of parameters. These are:
+%
+%    tol,...                Early stopping tolerance. Default value: tol =
+%                           1-e5
+%    ALPSiters,...          Maximum number of algorithm iterations. Default
+%                           value: 300. 
+%    solveNewtonb,...       If solveNewtonb == 1: Corresponds to solving a 
+%                           Newton system restricted to a sparse support. 
+%                           It is implemented via conjugate gradients.  
+%                           If solveNewtonb == 0: Step size selection as described
+%                           in eqs. (12) and (13) in [1]. 
+%                           Default value: solveNewtonb = 0.
+%    gradientDescentx,...   If gradientDescentx == 1: single gradient
+%                           update of x_{i+1} restricted ot its support with 
+%                           line search. Default value: gradientDescentx =
+%                           1.
+%    solveNewtonx,...       If solveNewtonx == 1: Akin to Hard Thresholding Pursuit
+%                           (c.f. Simon Foucart, "Hard Thresholding Pursuit," 
+%                           preprint, 2010). Default vale: solveNewtonx = 0
+%    tau,...                Variable that controls the momentum in
+%                           non-memoryless case. Ignored in memoryless
+%                           case. Default value: tau = 1/2.
+%                           Special cases:                               
+%                               - tau = -1: momentum step size is automatically
+%                               optimized in every step.
+%                               - tau as a function handle: user defined
+%                               behavior of tau momentum term.
+%    mu,...                 Variable that controls the step size selection. 
+%                           When mu = 0, step size is computed adaptively 
+%                           per iteration. Default value: mu = 0. 
+%    cg_maxiter,...         Maximum iterations for Conjugate-Gradients method.
+%    cg_tol                 Tolerance variable for Conjugate-Gradients method.  
+% =========================================================================
+% OUTPUT ARGUMENTS:
+% x_hat                     N x 1 recovered K-sparse vector.
+% numiter                   Number of iterations executed.
+% x_path                    Keeps a series of computed N x 1 K-sparse vectors 
+%                           until the end of the iterative process.
+% =========================================================================
+% 01/04/2011, by Anastasios Kyrillidis. anastasios.kyrillidis@epfl.ch, EPFL.
+% =========================================================================
+% cgsolve.m is written by Justin Romberg, Caltech, Oct. 2005.
+%                         Email: jrom@acm.caltech.edu
+% =========================================================================
+% This work was supported in part by the European Commission under Grant 
+% MIRG-268398 and DARPA KeCoM program #11-DARPA-1055. VC also would like 
+% to acknowledge Rice University for his Faculty Fellowship.
+% =========================================================================
+
+[M, N] = size(Phi);
+
+%% Initialize transpose of measurement matrix
+
+Phi_t = Phi';
+
+%% Initialize to zero vector
+y_cur = zeros(N,1);
+Y_i = [];
+
+x_path = zeros(N, params.ALPSiters);
+
+%% CG params
+if (params.solveNewtonx == 1 || params.solveNewtonb == 1)
+    cg_verbose = 0;
+    cg_A = Phi_t*Phi;
+    cg_b = Phi_t*y;
+end;
+
+%% Determine momentum step size selection strategy
+optimizeTau = 0;
+function_tau = 0;
+
+if (isa(params.tau,'float'))
+    if (params.tau == -1)
+        optimizeTau = 1;
+    end;
+elseif (isa(params.tau, 'function_handle'))
+    function_tau = 1;
+end;
+
+%% Determine step size selection strategy
+function_mu = 0;
+adaptive_mu = 0;
+
+if (isa(params.mu,'float'))
+    function_mu = 0;
+    if (params.mu == 0)
+        adaptive_mu = 1;
+    else
+        adaptive_mu = 0;
+    end;
+elseif (isa(params.mu,'function_handle'))
+    function_mu = 1;
+end;
+
+%% Help variables
+complementary_Yi = ones(N,1);
+x = zeros(N,params.memory + 1);
+Phi_x = zeros(M,params.memory + 1);
+tau_candidates = zeros(params.memory,1);
+y_candidates = zeros(N,params.memory);
+residual_energy = zeros(params.memory,1);
+
+i = 1;
+%% l-ALPS(#)
+while (i <= params.ALPSiters)
+    x_path(:,i) = x(:,end);
+    
+    for k = 1:params.memory
+        x(:,k) = x(:,k+1);    
+        Phi_x(:,k) = Phi_x(:,k+1);
+    end;
+        
+    % Compute the residual             
+    if (i == 1)
+        res = y;
+    else         
+        Phi_y_cur = Phi(:,Y_i)*y_cur(Y_i);
+        res = y - Phi_y_cur;
+    end;
+    
+    % Compute the derivative
+    der = Phi_t*res;
+    
+    % Determine S_i via eq. (11) (change of variable from x_i to y_i)
+    complementary_Yi(Y_i) = 0;
+    [tmpArg, ind_der] = sort(abs(der).*complementary_Yi, 'descend');
+    complementary_Yi(Y_i) = 1;
+    S_i = [Y_i; ind_der(1:K)];       
+
+    ider = der(S_i);
+    if (params.solveNewtonb == 1)
+        % Compute least squares solution of the system A*y = (A*A)x using CG                        
+        if (params.useCG == 1)
+            [b, tmpArg, tmpArg] = cgsolve(cg_A(S_i, S_i), cg_b(S_i), params.cg_tol, params.cg_maxiter, cg_verbose);
+        else 
+            b = cg_A(S_i,S_i)\cg_b(S_i);
+        end;
+    else
+        % Step size selection via eq. (12) and eq. (13) (change of variable from x_i to y_i)
+        if (adaptive_mu)
+            Pder = Phi(:,S_i)*ider;
+            mu_bar = ider'*ider/(Pder'*Pder);            
+            b = y_cur(S_i) + mu_bar*ider;
+        elseif (function_mu)
+            b = y_cur(S_i) + params.mu(i)*ider;
+        else b = y_cur(S_i) + params.mu*ider;
+        end;
+    end;
+
+    % Hard-threshold b and compute X_{i+1}
+    [tmpArg, ind_b] = sort(abs(b), 'descend');
+    x(:,end) = zeros(N,1);
+    x(S_i(ind_b(1:K)),end) = b(ind_b(1:K));
+    X_i = S_i(ind_b(1:K));
+    
+    if (params.gradientDescentx == 1)
+        % Calculate gradient of estimated current x vector
+        Phi_x_cur = Phi(:,X_i)*x(X_i,end);
+        res = y - Phi_x_cur;
+        der = Phi_t*res;
+        ider = der(X_i);
+        
+        if (adaptive_mu)
+            Pder = Phi(:,X_i)*ider;
+            mu_bar = ider'*ider/(Pder'*Pder);
+            x(X_i,end) = x(X_i,end) + mu_bar*ider;
+        elseif (function_mu)
+            x(X_i,end) = x(X_i,end) + params.mu(i)*ider;
+        else x(X_i,end) = x(X_i,end) + params.mu*ider;
+        end;
+    elseif (params.solveNewtonx == 1)                
+        % Similar to HTP
+        if (params.useCG == 1)
+            [v, tmpArg, tmpArg] = cgsolve(cg_A(X_i, X_i), cg_b(X_i), params.cg_tol, params.cg_maxiter, cg_verbose);
+        else
+            v = cg_A(X_i, X_i)\cg_b(X_i);
+        end;
+        x(X_i,end) = v;    
+    end;
+
+    Phi_x(:,end) = Phi(:,X_i)*x(X_i,end);
+    res = y - Phi_x(:,end);
+    if (~function_tau)    % If tau is not a function handle...        
+        if (optimizeTau)   % Compute optimized tau
+            if (i > params.memory)
+                
+                % tau = argmin ||u - Phi*y_{i+1}|| 
+                %     = <res, Phi*(x_cur - x_prev)>/||Phi*(x_cur - x_prev)||^2        
+
+                for k = 1:params.memory            
+                    Phi_diff = Phi_x(:,end) - Phi_x(:,k);
+                    tau_candidates(k) = res'*Phi_diff/(Phi_diff'*Phi_diff);
+                    y_candidates(:,k) = x(:,end) + tau_candidates(k)*(x(:,end) - x(:,k));
+                    residual_energy(k) = norm(y - Phi*y_candidates(:,k));                    
+                end;
+
+                [tmpArg,min_ind] = min(residual_energy);
+                y_cur = y_candidates(:,min_ind);
+                Y_i = find(ne(y_cur,0));
+            else
+                Phi_diff = Phi_x(:,end) - Phi_x(:,end-1);
+                params.tau = res'*Phi_diff/(Phi_diff'*Phi_diff);
+                y_cur = x(:,end) + params.tau*(x(:,end) - x(:,end-1));
+                Y_i = find(ne(y_cur,0));
+            end;
+        else
+            if (i > params.memory)
+                for k = 1:params.memory            
+                    y_candidates(:,k) = x(:,end) + params.tau*(x(:,end) - x(:,k));
+                    residual_energy(k) = norm(y - Phi*y_candidates(:,k));
+                end;
+                
+                [tmpArg,min_ind] = min(residual_energy);
+                y_cur = y_candidates(:,min_ind);
+                Y_i = find(ne(y_cur,0));
+            else                
+                y_cur = x(:,end) + params.tau*(x(:,end) - x(:,end-1));
+                Y_i = find(ne(y_cur,0));
+            end;
+        end;
+    else
+        if (i > params.memory)
+            for k = 1:params.memory            
+                y_candidates(:,k) = x(:,end) + params.tau(i)*(x(:,end) - x(:,k));
+                residual_energy(k) = norm(y - Phi*y_candidates(:,k))^2;
+            end;
+
+            [tmpArg,min_ind] = min(residual_energy);
+            y_cur = y_candidates(:,min_ind);
+            Y_i = find(ne(y_cur,0));
+        else                
+            y_cur = x(:,end) + params.tau(i)*(x(:,end) - x(:,end-1));
+            Y_i = find(ne(y_cur,0));
+        end;
+    end;
+    
+    % Test stopping criterion
+    if (i > 1) && (norm(x(:,end) - x(:,end-1)) < params.tol*norm(x(:,end)))
+        break;
+    end;                       
+    i = i + 1;            
+end;
+
+x_hat = x(:,end);
+numiter = i;
+
+if (i > params.ALPSiters)
+    x_path = x_path(:,1:numiter-1);
+else
+    x_path = x_path(:,1:numiter);
+end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/ALPS/one_ALPS.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,252 @@
+function [x_hat, numiter, x_path] = one_ALPS(y, Phi, K, params)
+% =========================================================================
+%                  1-ALPS(#) algorithm - Beta Version
+% =========================================================================
+% Algebraic Pursuit (ALPS) algorithm with 1-memory acceleration. 
+% 
+% Detailed discussion on the algorithm can be found in 
+% [1] "On Accelerated Hard Thresholding Methods for Sparse Approximation", written
+% by Volkan Cevher, Technical Report, 2011.
+% =========================================================================
+% INPUT ARGUMENTS:
+% y                         M x 1 undersampled measurement vector.
+% Phi                       M x N regression matrix.
+% K                         Sparsity of underlying vector x* or desired
+%                           sparsity of solution.
+% params                    Structure of parameters. These are:
+%
+%    tol,...                Early stopping tolerance. Default value: tol =
+%                           1-e5.
+%    ALPSiters,...          Maximum number of algorithm iterations. Default
+%                           value: 300. 
+%    solveNewtonb,...       If solveNewtonb == 1: Corresponds to solving a 
+%                           Newton system restricted to a sparse support. 
+%                           It is implemented via conjugate gradients.  
+%                           If solveNewtonb == 0: Step size selection as described
+%                           in eqs. (12) and (13) in [1]. 
+%                           Default value: solveNewtonb = 0.
+%    gradientDescentx,...   If gradientDescentx == 1: single gradient
+%                           update of x_{i+1} restricted ot its support with 
+%                           line search. Default value: gradientDescentx =
+%                           1.
+%    solveNewtonx,...       If solveNewtonx == 1: Akin to Hard Thresholding Pursuit
+%                           (c.f. Simon Foucart, "Hard Thresholding Pursuit," 
+%                           preprint, 2010). Default vale: solveNewtonx = 0.
+%    tau,...                Variable that controls the momentum in
+%                           non-memoryless case. Ignored in memoryless
+%                           case. Default value: tau = 1/2.
+%                           Special cases:
+%                               - tau = 0: momentum step size selection is
+%                               driven by the following formulas:
+%                                   a_1 = 1;
+%                                   a_{i+1} = (1+\sqrt(1+4a_i^2)/2;
+%                                   tau = (a_i - 1)/(a_{i+1});
+%                               described in [2] "A fast iterative
+%                               shrinkage-thresholding algorithm for linear
+%                               inverse problems", Beck A., and Teboulle M.
+%                               - tau = -1: momentum step size is automatically
+%                               optimized in every step.
+%                               - tau as a function handle: user defined
+%                               behavior of tau momentum term.
+%    mu,...                 Variable that controls the step size selection. 
+%                           When mu = 0, step size is computed adaptively 
+%                           per iteration. Default value: mu = 0. 
+%    cg_maxiter,...         Maximum iterations for Conjugate-Gradients method.
+%    cg_tol                 Tolerance variable for Conjugate-Gradients method.  
+% =========================================================================
+% OUTPUT ARGUMENTS:
+% x_hat                     N x 1 recovered K-sparse vector.
+% numiter                   Number of iterations executed.
+% x_path                    Keeps a series of computed N x 1 K-sparse vectors 
+%                           until the end of the iterative process.
+% =========================================================================
+% 01/04/2011, by Anastasios Kyrillidis. anastasios.kyrillidis@epfl.ch, EPFL.
+% =========================================================================
+% cgsolve.m is written by Justin Romberg, Caltech, Oct. 2005.
+%                         Email: jrom@acm.caltech.edu
+% =========================================================================
+% This work was supported in part by the European Commission under Grant 
+% MIRG-268398 and DARPA KeCoM program #11-DARPA-1055. VC also would like 
+% to acknowledge Rice University for his Faculty Fellowship.
+% =========================================================================
+
+[M,N] = size(Phi);
+
+%% Initialize transpose of measurement matrix
+
+Phi_t = Phi';
+
+%% Initialize to zero vector
+x_cur = zeros(N,1);
+y_cur = zeros(N,1);
+Phi_x_cur = zeros(M,1);
+Y_i = [];
+
+x_path = zeros(N, params.ALPSiters);
+
+%% CG params
+if (params.solveNewtonx == 1 || params.solveNewtonb == 1)
+    cg_verbose = 0;
+    cg_A = Phi_t*Phi;
+    cg_b = Phi_t*y;
+end;
+
+%% Determine momentum step size selection strategy
+fista = 0;
+optimizeTau = 0;
+a_prev = 1;
+function_tau = 0;
+
+if (isa(params.tau,'float'))
+    function_tau = 0;
+    if (params.tau == 0)
+        fista = 1;
+        optimizeTau = 0;
+    elseif (params.tau == -1)
+        optimizeTau = 1;
+        fista = 0;
+    end;
+elseif (isa(params.tau, 'function_handle'))
+    function_tau = 1;
+end;
+
+%% Determine step size selection strategy
+function_mu = 0;
+adaptive_mu = 0;
+
+if (isa(params.mu,'float'))
+    function_mu = 0;
+    if (params.mu == 0)
+        adaptive_mu = 1;
+    else
+        adaptive_mu = 0;
+    end;
+elseif (isa(params.mu,'function_handle'))
+    function_mu = 1;
+end;
+
+%% Help variables
+complementary_Yi = ones(N,1);
+
+i = 1;
+%% 1-ALPS(#)
+while (i <= params.ALPSiters)
+    x_path(:,i) = x_cur;
+    x_prev = x_cur;        
+    
+    % Compute the residual
+    if (i == 1)
+        res = y;
+        % Compute the derivative
+        der = Phi_t*res;
+    else
+        % Compute the derivative
+        if (optimizeTau)
+            res = y - Phi_x_cur - params.tau*Phi_diff;
+        else
+            res = y - Phi(:,Y_i)*y_cur(Y_i);
+        end;
+        der = Phi_t*res;
+    end;
+    
+    Phi_x_prev = Phi_x_cur;               
+    
+    % Determine S_i set via eq. (11) (change of variable from x_i to y_i)
+    complementary_Yi(Y_i) = 0;
+    [tmpArg, ind_der] = sort(abs(der).*complementary_Yi, 'descend');  
+    complementary_Yi(Y_i) = 1;
+    S_i = [Y_i; ind_der(1:K)];
+        
+    ider = der(S_i);
+    if (params.solveNewtonb == 1)     
+        % Compute least squares solution of the system A*y = (A*A)x using CG                        
+        if (params.useCG == 1)
+            [b, tmpArg, tmpArg] = cgsolve(cg_A(S_i, S_i), cg_b(S_i), params.cg_tol, params.cg_maxiter, cg_verbose);
+        else 
+            b = cg_A(S_i,S_i)\cg_b(S_i);
+        end;
+    else
+        % Step size selection via eq. (12) and eq. (13) (change of variable from x_i to y_i)
+        if (adaptive_mu)
+            Pder = Phi(:,S_i)*ider;
+            mu_bar = ider'*ider/(Pder'*Pder);            
+            b = y_cur(S_i) + (mu_bar)*ider;        
+        elseif (function_mu)
+            b = y_cur(S_i) + params.mu(i)*ider;
+        else b = y_cur(S_i) + params.mu*ider;
+        end;
+    end;
+
+    % Hard-threshold b and compute X_{i+1}
+    [tmpArg, ind_b] = sort(abs(b), 'descend');
+    x_cur = zeros(N,1);
+    x_cur(S_i(ind_b(1:K))) = b(ind_b(1:K));
+    X_i = S_i(ind_b(1:K));
+
+    if (params.gradientDescentx == 1)
+        % Calculate gradient of estimated vector x_cur
+        Phi_x_cur = Phi(:,X_i)*x_cur(X_i);
+        res = y - Phi_x_cur;
+        der = Phi_t*res;
+        ider = der(X_i);
+        
+        if (adaptive_mu)
+            Pder = Phi(:,X_i)*ider;
+            mu_bar = ider'*ider/(Pder'*Pder);
+            x_cur(X_i) = x_cur(X_i) + mu_bar*ider;
+        elseif (function_mu)
+            x_cur(X_i) = x_cur(X_i) + params.mu(i)*ider;
+        else x_cur(X_i) = x_cur(X_i) + params.mu*ider;
+        end;
+    elseif (params.solveNewtonx == 1)                
+        % Similar to HTP
+        if (params.useCG == 1)
+            [v, tmpArg, tmpArg] = cgsolve(cg_A(X_i, X_i), cg_b(X_i), params.cg_tol, params.cg_maxiter, cg_verbose);
+        else
+            v = cg_A(X_i, X_i)\cg_b(X_i);
+        end;
+        x_cur(X_i) = v;
+    end;
+    
+    if (~function_tau)    % If tau is not a function handle...
+        if (fista)        % Fista configuration
+            a_cur = (1 + sqrt(1 + 4*a_prev^2))/2;
+            params.tau = (a_prev - 1)/a_cur;
+            a_prev = a_cur;
+        elseif (optimizeTau)   % Compute optimized tau
+
+            % tau = argmin ||u - Phi*y_{i+1}|| 
+            %     = <res, Phi*(x_cur - x_prev)>/||Phi*(x_cur - x_prev)||^2        
+
+            Phi_x_cur = Phi(:,X_i)*x_cur(X_i);
+            res = y - Phi_x_cur;
+            if (i == 1)
+                Phi_diff = Phi_x_cur;
+            else
+                Phi_diff = Phi_x_cur - Phi_x_prev;
+            end;
+            params.tau = res'*Phi_diff/(Phi_diff'*Phi_diff);
+        end;
+
+        y_cur = x_cur + params.tau*(x_cur - x_prev);
+        Y_i = find(ne(y_cur, 0));
+    else
+        y_cur = x_cur + params.tau(i)*(x_cur - x_prev);
+        Y_i = find(ne(y_cur, 0));
+    end;
+        
+    % Test stopping criterion
+    if (i > 1) && (norm(x_cur - x_prev) < params.tol*norm(x_cur))
+        break;
+    end;
+    i = i + 1;
+end;
+
+x_hat = x_cur;
+numiter= i;
+
+if (i > params.ALPSiters)
+    x_path = x_path(:,1:numiter-1);
+else
+    x_path = x_path(:,1:numiter);
+end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/ALPS/thresh.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,4 @@
+function x= thresh(x, K)
+
+[~, ind] = sort(abs(x), 'descend');
+x(ind(K+1:end))= 0;
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/ALPS/zero_ALPS.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,189 @@
+function [x_hat, numiter, x_path] = zero_ALPS(y, Phi, K, params)
+% =========================================================================
+%                0-ALPS(#) algorithm - Beta Version
+% =========================================================================
+% Algebraic Pursuit (ALPS) algorithm with memoryless acceleration. 
+% 
+% Detailed discussion on the algorithm can be found in 
+% [1] "On Accelerated Hard Thresholding Methods for Sparse Approximation", written
+% by Volkan Cevher, Technical Report, 2011.
+% =========================================================================
+% INPUT ARGUMENTS:
+% y                         M x 1 undersampled measurement vector.
+% Phi                       M x N regression matrix.
+% K                         Sparsity of underlying vector x* or desired
+%                           sparsity of solution.
+% params                    Structure of parameters. These are:
+%
+%    tol,...                Early stopping tolerance. Default value: tol =
+%                           1-e5
+%    ALPSiters,...          Maximum number of algorithm iterations. Default
+%                           value: 300. 
+%    mode, ...              According to [1], possible values are
+%                           [0,1,2,4,5,6]. This value comes from the binary 
+%                           representation of the parameters:
+%                           (solveNewtob, gradientDescentx, solveNewtonx), 
+%                           which are explained next. Default value = 0.
+%    solveNewtonb,...       If solveNewtonb == 1: Corresponds to solving a 
+%                           Newton system restricted to a sparse support. 
+%                           It is implemented via conjugate gradients.  
+%                           If solveNewtonb == 0: Step size selection as described
+%                           in eqs. (12) and (13) in [1]. 
+%                           Default value: solveNewtonb = 0.
+%    gradientDescentx,...   If gradientDescentx == 1: single gradient
+%                           update of x_{i+1} restricted ot its support with 
+%                           line search. Default value: gradientDescentx =
+%                           1.
+%    solveNewtonx,...       If solveNewtonx == 1: Akin to Hard Thresholding Pursuit
+%                           (c.f. Simon Foucart, "Hard Thresholding Pursuit," 
+%                           preprint, 2010). Default vale: solveNewtonx = 0
+%    mu,...                 Variable that controls the step size selection. 
+%                           When mu = 0, step size is computed adaptively 
+%                           per iteration. Default value: mu = 0. 
+%    cg_maxiter,...         Maximum iterations for Conjugate-Gradients method.
+%    cg_tol                 Tolerance variable for Conjugate-Gradients method.                                
+% =========================================================================
+% OUTPUT ARGUMENTS:
+% x_hat                     N x 1 recovered K-sparse vector.
+% numiter                   Number of iterations executed.
+% x_path                    Keeps a series of computed N x 1 K-sparse vectors 
+%                           until the end of the iterative process.
+% =========================================================================
+% 01/04/2011, by Anastasios Kyrillidis. anastasios.kyrillidis@epfl.ch, EPFL.
+% =========================================================================
+% cgsolve.m is written by Justin Romberg, Caltech, Oct. 2005.
+%                         Email: jrom@acm.caltech.edu
+% =========================================================================
+% This work was supported in part by the European Commission under Grant 
+% MIRG-268398 and DARPA KeCoM program #11-DARPA-1055. VC also would like 
+% to acknowledge Rice University for his Faculty Fellowship.
+% =========================================================================
+
+[tmpArg, N] = size(Phi);
+
+%% Initialize transpose of measurement matrix
+
+Phi_t = Phi';
+
+%% Initialize to zero vector
+x_cur = zeros(N,1);
+X_i = [];
+
+x_path = zeros(N, params.ALPSiters);
+
+%% CG params
+if (params.mode == 1 || params.mode == 4 || params.mode == 5 || params.mode == 6)
+    cg_verbose = 0;
+    cg_A = Phi_t*Phi;
+    cg_b = Phi_t*y;
+end;
+
+%% Determine step size selection strategy
+function_mu = 0;
+adaptive_mu = 0;
+
+if (isa(params.mu,'float'))
+    function_mu = 0;
+    if (params.mu == 0)
+        adaptive_mu = 1;
+    else
+        adaptive_mu = 0;
+    end;
+elseif (isa(params.mu,'function_handle'))
+    function_mu = 1;
+end;
+
+%% Help variables
+complementary_Xi = ones(N,1);
+
+i = 1;
+%% 0-ALPS(#)
+while (i <= params.ALPSiters)
+    x_path(:,i) = x_cur;
+    x_prev = x_cur;
+
+    % Compute the residual
+    if (i == 1)
+        res = y;
+    else         
+        Phi_x_cur = Phi(:,X_i)*x_cur(X_i);
+        res = y - Phi_x_cur;
+    end;
+    
+    % Compute the derivative
+    der = Phi_t*res;         
+    
+    % Determine S_i set via eq. (11)
+    complementary_Xi(X_i) = 0;
+    [tmpArg, ind_der] = sort(abs(der).*complementary_Xi, 'descend');
+    complementary_Xi(X_i) = 1;
+    S_i = [X_i; ind_der(1:K)];
+           
+    ider = der(S_i);
+    if (params.solveNewtonb == 1)
+        % Compute least squares solution of the system A*y = (A*A)x using CG                        
+        if (params.useCG == 1)
+            [b, tmpArg, tmpArg] = cgsolve(cg_A(S_i, S_i), cg_b(S_i), params.cg_tol, params.cg_maxiter, cg_verbose);
+        else
+            b = cg_A(S_i,S_i)\cg_b(S_i);
+        end;        
+    else
+        % Step size selection via eq. (12) and eq. (13) 
+        if (adaptive_mu)
+            Pder = Phi(:,S_i)*ider;
+            mu_bar = ider'*ider/(Pder'*Pder);
+            b = x_cur(S_i) + (mu_bar)*ider;    
+        elseif (function_mu)            
+            b = x_cur(S_i) + params.mu(i)*ider;
+        else
+            b = x_cur(S_i) + params.mu*ider;
+        end;
+    end;
+
+    % Hard-threshold b and compute X_{i+1}
+    [tmpArg, ind_b] = sort(abs(b), 'descend');
+    x_cur = zeros(N,1);
+    x_cur(S_i(ind_b(1:K))) = b(ind_b(1:K));
+    X_i = S_i(ind_b(1:K));
+
+    if (params.gradientDescentx == 1)
+        % Calculate gradient of estimated vector x_cur
+        Phi_x_cur = Phi(:,X_i)*x_cur(X_i);
+        res = y - Phi_x_cur;
+        der = Phi_t*res;
+        ider = der(X_i);
+        
+        if (adaptive_mu)                
+            Pder = Phi(:,X_i)*ider;
+            mu_bar = ider'*ider/(Pder'*Pder);
+            x_cur(X_i) = x_cur(X_i) + mu_bar*ider;
+        elseif (function_mu)
+            x_cur(X_i) = x_cur(X_i) + params.mu(i)*ider;
+        else x_cur(X_i) = x_cur(X_i) + params.mu*ider;
+        end;
+    elseif (params.solveNewtonx == 1)                
+        % Similar to HTP
+        if (params.useCG == 1)
+            [v, tmpArg, tmpArg] = cgsolve(cg_A(X_i, X_i), cg_b(X_i), params.cg_tol, params.cg_maxiter, cg_verbose);
+        else
+            v = cg_A(X_i,X_i)\cg_b(X_i);
+        end;
+        x_cur(X_i) = v;
+    end;
+     
+    % Test stopping criterion
+    if (i > 1) && (norm(x_cur - x_prev) < params.tol*norm(x_cur))
+        break;
+    end;
+    i = i + 1;  
+      
+end;
+
+x_hat = x_cur;
+numiter = i;
+
+if (i > params.ALPSiters)
+    x_path = x_path(:,1:numiter-1);
+else
+    x_path = x_path(:,1:numiter);
+end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/ALPSbenchmark_errornorm_vs_iter.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,228 @@
+% =========================================================================
+%                   Algebraic Pursuit algorithms - Demo
+% =========================================================================
+% Algebraic Pursuit (ALPS) algorithms are accelerated hard thresholding methods 
+% on sparse recovery of linear inverse systems. In particular, let
+%             Phi : M x N real matrix (M < N)
+%             x*  : N x 1 K-sparse data vector
+%             n   : N x 1 additive noise vector
+% then, y = Phi x* + n is the undersampled M x 1 measurement vector. ALPS 
+% solve the following minimization problem 
+%          minimize ||y - Phi x||^2    subject to x is K-sparse vector. 
+% 
+% Detailed discussion on the algorithm can be found in 
+% [1] "On Accelerated Hard Thresholding Methods for Sparse Approximation", written
+% by Volkan Cevher, Technical Report, 2011.
+% =========================================================================
+% 01/04/2011, by Anastasios Kyrillidis. anastasios.kyrillidis@epfl.ch, EPFL.
+% =========================================================================
+
+clear all
+close all
+clc;
+
+addpath ALPS/;
+
+% General parameters
+N = 4000;                       % Size of sparse vector x
+M = 800;                        % Number of measurements
+K = ceil(M*(0.2));              % Sparsity of  x*
+sigma = 0.0000;                 % Additive noise variance
+verbose = 1;                    % Print information
+
+% ALPS parameters
+its = 600;                      % Maximum number of iterations
+Ttol= 1e-6;                     % Convergence tolerance
+tau = -1;                       % Momentum term
+mu = 0;                         % Step size selection
+MC = 10;                        % Number of Monte-Carlo iterations
+useCG = 0;                      % Conjugate Gradient
+
+% Please refer to generate_matrix.m and generate_vector.m files
+m_ensemble = 'Gaussian';        % Measurement matrix ensemble type
+x_ensemble = 'Gaussian';        % Sparse vector ensemble type
+
+max_its = 0;
+
+for i = 1:MC
+    x = generate_vector(N, K, x_ensemble);
+    Phi = generate_matrix(M, N, m_ensemble);
+    noise = sigma*randn(M,1);
+    y = Phi*x + noise;
+    
+    % 0-ALPS(0)    
+    disp('=====================================================================');
+    [x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, 'memory', 0, 'mode', 0, 'tolerance', Ttol, 'ALPSiterations', its, 'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+    str = sprintf('Error norm: %f ', norm(x-x_hat));
+    disp(str);
+    num_iter(1, i) = numiter;    
+    error(1,i,1:size(x_path,2))=sqrt(sum(abs(x*ones(1,size(x_path,2))-x_path).^2));
+    total_time(1, i) = time;
+    
+    if (numiter > max_its)
+        max_its = numiter;
+    end;    
+    
+    % 0-ALPS(2)    
+    disp('=====================================================================');
+    [x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, 'memory', 0, 'mode', 2, 'tolerance', Ttol, 'ALPSiterations', its, 'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+    str = sprintf('Error norm: %f ', norm(x-x_hat));
+    disp(str);
+    num_iter(2, i) = numiter;    
+    error(2,i,1:size(x_path,2))=sqrt(sum(abs(x*ones(1,size(x_path,2))-x_path).^2));
+    total_time(2, i) = time;
+    
+    if (numiter > max_its)
+        max_its = numiter;
+    end;
+    
+    % 0-ALPS(4)
+    disp('=====================================================================');
+    [x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, 'memory', 0, 'mode', 4, 'tolerance', Ttol, 'ALPSiterations', its, 'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+    str = sprintf('Error norm: %f ', norm(x-x_hat));
+    disp(str);
+    num_iter(3, i) = numiter;    
+    error(3,i,1:size(x_path,2))=sqrt(sum(abs(x*ones(1,size(x_path,2))-x_path).^2));
+    total_time(3, i) = time;
+    
+    if (numiter > max_its)
+        max_its = numiter;
+    end;
+    
+    % 0-ALPS(5)
+    disp('=====================================================================');
+    [x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, 'memory', 0, 'mode', 5, 'tolerance', Ttol, 'ALPSiterations', its, 'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+    str = sprintf('Error norm: %f ', norm(x-x_hat));
+    disp(str);
+    num_iter(4, i) = numiter;    
+    error(4,i,1:size(x_path,2))=sqrt(sum(abs(x*ones(1,size(x_path,2))-x_path).^2));
+    total_time(4, i) = time;    
+    
+    if (numiter > max_its)
+        max_its = numiter;
+    end;
+    
+    % 1-ALPS(2)
+    disp('=====================================================================');
+    [x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, 'memory', 1, 'mode', 2, 'tolerance', Ttol, 'ALPSiterations', its, 'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+    str = sprintf('Error norm: %f ', norm(x-x_hat));
+    disp(str);
+    num_iter(5, i) = numiter;    
+    error(5,i,1:size(x_path,2))=sqrt(sum(abs(x*ones(1,size(x_path,2))-x_path).^2));
+    total_time(5, i) = time;
+    
+    if (numiter > max_its)
+        max_its = numiter;
+    end;
+    
+    % 1-ALPS(4)
+    disp('=====================================================================');
+    [x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, 'memory', 1, 'mode', 4, 'tolerance', Ttol, 'ALPSiterations', its, 'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+    str = sprintf('Error norm: %f ', norm(x-x_hat));
+    disp(str);
+    num_iter(6, i) = numiter;    
+    error(6,i,1:size(x_path,2))=sqrt(sum(abs(x*ones(1,size(x_path,2))-x_path).^2));
+    total_time(6, i) = time;    
+    
+    if (numiter > max_its)
+        max_its = numiter;
+    end;
+    
+    % 1-ALPS(5)
+    disp('=====================================================================');
+    [x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, 'memory', 1, 'mode', 5, 'tolerance', Ttol, 'ALPSiterations', its, 'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+    str = sprintf('Error norm: %f ', norm(x-x_hat));
+    disp(str);
+    num_iter(7, i) = numiter;    
+    error(7,i,1:size(x_path,2))=sqrt(sum(abs(x*ones(1,size(x_path,2))-x_path).^2));
+    total_time(7,i) = time;    
+    
+    if (numiter > max_its)
+        max_its = numiter;
+    end;
+    
+    % 2-ALPS(2)
+    disp('=====================================================================');
+    [x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, 'memory', 2, 'mode', 2, 'tolerance', Ttol, 'ALPSiterations', its, 'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+    str = sprintf('Error norm: %f ', norm(x-x_hat));
+    disp(str);
+    num_iter(8, i) = numiter;    
+    error(8,i,1:size(x_path,2))=sqrt(sum(abs(x*ones(1,size(x_path,2))-x_path).^2));
+    total_time(8, i) = time;
+        
+    if (numiter > max_its)
+        max_its = numiter;
+    end;        
+      
+    % 2-ALPS(4)
+    disp('=====================================================================');
+    [x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, 'memory', 2, 'mode', 4, 'tolerance', Ttol, 'ALPSiterations', its, 'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+    str = sprintf('Error norm: %f ', norm(x-x_hat));
+    disp(str);
+    num_iter(9, i) = numiter;    
+    error(9,i,1:size(x_path,2))=sqrt(sum(abs(x*ones(1,size(x_path,2))-x_path).^2));
+    total_time(9, i) = time;
+    
+    if (numiter > max_its)
+        max_its = numiter;
+    end;
+    
+    % 2-ALPS(5)
+    disp('=====================================================================');
+    [x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, 'memory', 2, 'mode', 5, 'tolerance', Ttol, 'ALPSiterations', its, 'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+    str = sprintf('Error norm: %f ', norm(x-x_hat));
+    disp(str);
+    num_iter(10, i) = numiter;    
+    error(10,i,1:size(x_path,2))=sqrt(sum(abs(x*ones(1,size(x_path,2))-x_path).^2));
+    total_time(10, i) = time;
+    
+    if (numiter > max_its)
+        max_its = numiter;
+    end;
+    
+    % 3-ALPS(2)
+    disp('=====================================================================');
+    [x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, 'memory', 3, 'mode', 2, 'tolerance', Ttol, 'ALPSiterations', its, 'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+    str = sprintf('Error norm: %f ', norm(x-x_hat));
+    disp(str);
+    num_iter(11, i) = numiter;    
+    error(11,i,1:size(x_path,2))=sqrt(sum(abs(x*ones(1,size(x_path,2))-x_path).^2));
+    total_time(11, i) = time;
+        
+    if (numiter > max_its)
+        max_its = numiter;
+    end;    
+end;
+
+set(0,'DefaultAxesFontSize',12);
+    
+figure; 
+semilogy(squeeze(median(error(1,:,:),2)),'LineWidth',3); hold on
+semilogy(squeeze(median(error(2,:,:),2)),'--','LineWidth',3); 
+semilogy(squeeze(median(error(3,:,:),2)),':','LineWidth',3);
+semilogy(squeeze(median(error(4,:,:),2)),'k','LineWidth',4);
+semilogy(squeeze(median(error(5,:,:),2)),'k--','LineWidth',4);
+semilogy(squeeze(median(error(6,:,:),2)),'k:','LineWidth',4);
+semilogy(squeeze(median(error(7,:,:),2)),'k-.','LineWidth',4);
+semilogy(squeeze(median(error(8,:,:),2)),'r--','LineWidth',3);
+semilogy(squeeze(median(error(9,:,:),2)),'m--','LineWidth',3);
+semilogy(squeeze(median(error(10,:,:),2)),'k--','LineWidth',3);
+semilogy(squeeze(median(error(11,:,:),2)),'g--','LineWidth',3);
+    
+xlabel('[i]:iteration number')
+ylabel('||x-x_i||')
+axis([1 max_its + 10 10^-5 2*10^0])
+
+legend(strcat('0-IHT(0) [', num2str(mean(num_iter(1,:))),'][', num2str(mean(total_time(1,:))),']'), ...
+strcat('0-IHT(2) [', num2str(mean(num_iter(2,:))),'][', num2str(mean(total_time(2,:))),']'), ...
+strcat('0-IHT(4) [', num2str(mean(num_iter(3,:))),'][', num2str(mean(total_time(3,:))),']'), ...
+strcat('0-IHT(5) [', num2str(mean(num_iter(4,:))),'][', num2str(mean(total_time(4,:))),']'), ...
+strcat('1-IHT(2) [', num2str(mean(num_iter(5,:))),'][', num2str(mean(total_time(5,:))),']'), ...
+strcat('1-IHT(4) [', num2str(mean(num_iter(6,:))),'][', num2str(mean(total_time(6,:))),']'), ...
+strcat('1-IHT(5) [', num2str(mean(num_iter(7,:))),'][', num2str(mean(total_time(7,:))),']'), ...
+strcat('2-ALPS(2) [', num2str(mean(num_iter(8,:))),'][', num2str(mean(total_time(8,:))),']'), ...
+strcat('2-ALPS(4) [', num2str(mean(num_iter(9,:))),'][', num2str(mean(total_time(9,:))),']'), ...
+strcat('2-ALPS(5) [', num2str(mean(num_iter(10,:))),'][', num2str(mean(total_time(10,:))),']'), ...
+strcat('3-ALPS(2) [', num2str(mean(num_iter(11,:))),'][', num2str(mean(total_time(11,:))),']'));
+title( strcat('N= ', num2str(N), ', M=  ', num2str(M), ', K=  ', num2str(K)) );
+shg;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/ALPSdemo.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,79 @@
+% =========================================================================
+%                   Algebraic Pursuit algorithms - Demo
+% =========================================================================
+% Algebraic Pursuit (ALPS) algorithms are accelerated hard thresholding methods 
+% on sparse recovery of linear inverse systems. In particular, let
+%             Phi : M x N real matrix (M < N)
+%             x*  : N x 1 K-sparse data vector
+%             n   : N x 1 additive noise vector
+% then, y = Phi x* + n is the undersampled M x 1 measurement vector. ALPS 
+% solve the following minimization problem 
+%          minimize ||y - Phi x||^2    subject to x is K-sparse vector. 
+% 
+% Detailed discussion on the algorithm can be found in 
+% [1] "On Accelerated Hard Thresholding Methods for Sparse Approximation", written
+% by Volkan Cevher, Technical Report, 2011.
+% =========================================================================
+% 01/04/2011, by Anastasios Kyrillidis. anastasios.kyrillidis@epfl.ch, EPFL.
+% =========================================================================
+
+clear all
+close all
+clc
+
+addpath ALPS/;
+
+% General parameters
+N = 1000;                       % Size of sparse vector x
+M = 300;                        % Number of measurements
+K = ceil(M*(0.25));              % Sparsity of  x*
+sigma = 0.000;                 % Additive noise variance
+verbose = 1;                    % Print information
+
+% ALPS parameters
+its = 600;                      % Maximum number of iterations
+Ttol= 1e-6;                     % Convergence tolerance
+mode = 1;                       % Binary representation of (SolveNewtonb, GradientDescentx, SolveNewtox)
+memory = 1;                     % Memory of IHT method
+tau = 0;                        % Momentum term
+useCG = 0;                      % Usage of Conjugate gradients method
+mu = 0;                         % Step size selection
+
+% Please refer to generate_matrix.m and generate_vector.m files
+m_ensemble = 'Gaussian';        % Measurement matrix ensemble type
+x_ensemble = 'Gaussian';        % Sparse vector ensemble type
+
+% reset(RandStream.getDefaultStream);
+
+%% Generate data
+x = generate_vector(N, K, x_ensemble);
+Phi = generate_matrix(M, N, m_ensemble);
+noise = sigma*randn(M,1);
+y = Phi*x + noise;
+
+%% ALPS reconstruction
+disp('=====================================================================');
+[x_hat, numiter, time, x_path] = AlgebraicPursuit(y, Phi, K, 'memory', memory, 'mode', mode, 'tolerance', Ttol, 'ALPSiterations', its, 'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+if (numiter ~= -1)
+    str = sprintf('Error norm: %f ', norm(x-x_hat));
+    disp(str);
+end;
+
+%% Plot error per iteration
+if (numiter ~= -1)
+    error = sqrt(sum(abs(x*ones(1,size(x_path,2))-x_path).^2));
+
+    set(0, 'DefaultAxesFontSize', 14);
+    figure;
+    semilogy(error,'LineWidth',3); hold on;
+    grid on;
+    semilogy((norm(noise))*ones(1,its),'m-','LineWidth',1);
+
+    xlabel('[i]:iteration number')
+    ylabel('||x-x_i||')
+    axis([1 numiter + 5 10^-5 2*10^0])
+
+    legend(strcat('Time = [',num2str(time),']'), 'Noise Level');
+    title(strcat('N = ', num2str(N), ', M = ', num2str(M), ', K = ', num2str(K)));
+    shg;
+end;
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/README	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,19 @@
+This archive includes:
+
+1. ALPS folder
+2. generate_matrix.m: generates measurement matrix.
+3. generate_vector.m: generates sparse vector.
+4. Report_bibtex.tex: bibtex reference for citing.
+5. ALPSdemo.m: demo of ALPS.
+6. ALPSbenchmark_errornorm_vs_iter.m: generates figure of average error norm per iteration vs. # of iterations.
+
+ALPS folder includes:
+
+1. AlgebraicPursuit.m: "Wrapper" code.
+2. zero_ALPS.m: Implementation of 0-memory acceleration.
+3. one_ALPS.m: Implementation of 1-memory acceleration.
+4. l_ALPS.m: Implementation of l-memory acceleration.
+5. infty_ALPS.m: Implementation of infty-memory acceleration.
+6. cgsolve.m: CG algorithm created by Justin Romberg, Caltech.
+
+Please run ALPSdemo in MATLAB command line to verify installation before use.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/Report_bibtex.bib	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,27 @@
+@comment{ generated by <http://infoscience.epfl.ch/> }
+
+@TechReport{EPFL-REPORT-162761,
+   abstract    = {We propose and analyze acceleration schemes for hard
+                 thresholding methods with applications to sparse
+                 approximation in linear inverse systems. Our acceleration
+                 schemes fuse combinatorial, sparse projection algorithms
+                 with convex optimization algebra to provide
+                 computationally efficient and robust sparse recovery
+                 methods. We compare and contrast the (dis)advantages of
+                 the proposed schemes with the state-of-the-art, not only
+                 within hard thresholding methods, but also within convex
+                 sparse recovery algorithms.},
+   affiliation = {EPFL},
+   author      = {Cevher, Volkan},
+   details     = {http://infoscience.epfl.ch/record/162761},
+   documenturl = {http://infoscience.epfl.ch/record/162761/files/techreport.pdf},
+   keywords    = {structured sparsity; sparse recovery; hard thresholding},
+   oai-id      = {oai:infoscience.epfl.ch:162761},
+   oai-set     = {report; fulltext; fulltext-public},
+   status      = {PUBLISHED},
+   submitter   = {199128; 199128},
+   title       = {On {A}ccelerated {H}ard {T}hresholding {M}ethods for
+                 {S}parse {A}pproximation},
+   unit        = {LIONS},
+   year        = 2011
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/generate_matrix.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,59 @@
+function [A] = generate_matrix(M, N, ensemble, p)
+% =========================================================================
+%                       Measerement matrix generator
+% =========================================================================
+% INPUT ARGUMENTS:
+% M                         Number of measurements (number of rows).
+% N                         Size of sparse vector (number of columns).
+% ensemble                  Ensemble type of measurement matrix. Possible
+%                           values are:
+%                           -'Gaussian': creates a MxN measurement matrix with 
+%                           elements drawn from normal distribution N(0,1).%                           
+%                           -'Bernoulli': creates a MxN measurement matrix with 
+%                           elements drawn from Bernoulli distribution (1/2,1/2).
+%                           -'pBernoulli': creates a MxN measurement matrix with 
+%                           elements drawn from Bernoulli distribution (p,1-p).
+%                           Parameter of Bernoulli distribution.
+%                           -'sparseGaussian': creates a MxN sparse measurement 
+%                           matrix with elements drawn from normal distribution N(0,1).
+% =========================================================================
+% OUTPUT ARGUMENTS:
+% A                     MxN measurement matrix with normalized columns.
+% =========================================================================
+% 01/04/2011, by Anastasios Kyrillidis. anastasios.kyrillidis@epfl.ch, EPFL.
+% =========================================================================
+if nargin < 3
+    ensemble = 'Gaussian';
+end;
+
+if nargin < 4
+    p = 0.5;
+end;
+
+switch ensemble
+    case 'Gaussian'
+        A = randn(M,N);         % Standard normal distribution                
+        for i = 1:N             % Normalize columns
+            A(:,i) = A(:,i)/norm(A(:,i));
+        end;
+    case 'Bernoulli'
+        A = (-1).^round(rand(M,N));     % Bernoulli ~ (1/2, 1/2) distribution
+        for i = 1:N             % Normalize columns
+            A(:,i) = A(:,i)/norm(A(:,i));
+        end;
+    case 'pBernoulli'
+        A = (-1).^(rand(M,N) > p);   % Bernoulli ~ (p, 1-p) distribution     
+        for i = 1:N             % Normalize columns
+            A(:,i) = A(:,i)/norm(A(:,i));
+        end;
+    case 'sparseGaussian'
+        leftd = 8;
+        A = zeros(M,N);
+        for i = 1:N
+            ind = randperm(M);
+            A(ind(1:leftd),i)=1/leftd;
+        end
+        for i = 1:N             % Normalize columns
+            A(:,i) = A(:,i)/norm(A(:,i));
+        end;
+end;
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/alps/generate_vector.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,52 @@
+function [x] = generate_vector(N, K, ensemble, sigma, p)
+% =========================================================================
+%                       Sparse vector generator
+% =========================================================================
+% INPUT ARGUMENTS:
+% N                         Size of sparse vector.
+% K                         Sparsity of vector.
+% ensemble                  Ensemble type of measurement matrix. Possible
+%                           values are:
+%                           -'Gaussian': K non-zero elements of the sparse vector
+%                           are drawn from normal distribution N(0,1).
+%                           -'sGaussian': K non-zero elements of the sparse vector
+%                           are drawn from normal distribution N(0,sigma^2).
+%                           -'Bernoulli': K non-zero elements of the sparse vector
+%                           are drawn from Bernoulli distribution (1/2,1/2).
+%                           -'pBernoulli': K non-zero elements of the sparse vector
+%                           are drawn from Bernoulli distribution (p,1-p).
+% sigma                     Standard deviation of Gaussian distribution.
+% p                         Parameter of Bernoulli distribution.
+% =========================================================================
+% OUTPUT ARGUMENTS:
+% x                     K-sparse vector.
+% =========================================================================
+% 01/04/2011, by Anastasios Kyrillidis. anastasios.kyrillidis@epfl.ch, EPFL.
+% =========================================================================
+
+if nargin < 3
+    ensemble = 'Gaussian';
+end;
+
+if nargin < 4
+    sigma = 1;
+    p = 0.5; 
+end;
+
+x = zeros(N,1);
+rand_indices = randperm(N);
+
+switch ensemble
+    case 'Gaussian'
+        x(rand_indices(1:K)) = randn(K,1);         % Standard normal distribution ~ N(0,1)
+    case 'sGaussian'
+        x(rand_indices(1:K)) = sigma*randn(K,1);    % Normal distribution ~ N(0,sigma^2)
+    case 'Uniform'
+        x(rand_indices(1:K)) = rand(K,1);           % Uniform [0,1] distribution
+    case 'Bernoulli'
+        x(rand_indices(1:K)) = (-1).^round(rand(K,1));     % Bernoulli ~ (1/2, 1/2) distribution
+    case 'pBernoulli'
+        x(rand_indices(1:K)) = (-1).^(rand(K,1) > p);   % Bernoulli ~ (p, 1-p) distribution     
+end;
+
+x = x/norm(x);
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/wrapper_ALPS_toolbox.m	Fri Aug 12 11:17:47 2011 +0100
@@ -0,0 +1,80 @@
+function [x , numiter, time, x_path] = wrapper_ALPS_toolbox(b, A, param)
+%% SMALL wrapper for ALPS toolbox
+%
+%   Function gets as input
+%       b - measurement vector 
+%       A - dictionary 
+%       K - desired sparsity level
+%       param - structure containing additional parameters
+%   Output:
+%       x - sparse solution
+%       numiter - number of iterations
+%       time - time needed to solve the problem#
+%       x_path - matrix containing x after every iteration
+
+%   Centre for Digital Music, Queen Mary, University of London.
+%   This file copyright 2011 Ivan Damnjanovic.
+%
+%   This program is free software; you can redistribute it and/or
+%   modify it under the terms of the GNU General Public License as
+%   published by the Free Software Foundation; either version 2 of the
+%   License, or (at your option) any later version.  See the file
+%   COPYING included with this distribution for more information.
+%   
+%%
+
+if isfield(param, 'sparsity')
+   sparsity = param.sparsity;
+else
+   printf('\nAlebraic Pursuit algorithms require desired sparsity level.\n("sparsity" field in solver parameters structure)\n ');
+   return
+end
+
+if isfield(param, 'memory')
+    memory = param.memory;
+else
+    memory = 0;
+end
+
+if isfield(param, 'mode')
+    mode = param.mode;
+else
+    mode = 0;
+end
+if isfield(param, 'tolerance')
+    tolerance = param.tolerance;
+else
+    tolerance = 1e-5;
+end
+if isfield(param, 'iternum')
+    iternum = param.iternum;
+else
+    iternum = 300;
+end
+if isfield(param, 'verbose')
+    verbose = param.verbose;
+else
+    verbose = 0;
+end
+if isfield(param, 'tau')
+    tau = param.tau;
+else
+    tau = 1/2;
+end
+if isfield(param, 'useCG')
+    useCG = param.useCG;
+else
+    useCG = 0;
+end
+if isfield(param, 'mu')
+    mu = param.mu;
+else
+    mu = 0;
+end
+training_size = size(b,2);
+x=zeros(size(A,2),training_size);
+for i = 1:training_size
+    [x(:,i), numiter, time, x_path] = AlgebraicPursuit(b(:,i), A, sparsity, 'memory', memory,...
+        'mode', mode, 'tolerance', tolerance, 'ALPSiterations', iternum, ...
+        'verbose', verbose, 'tau', tau, 'useCG', useCG, 'mu', mu);
+end
\ No newline at end of file
--- a/util/SMALL_plot.m	Fri Jul 29 12:35:52 2011 +0100
+++ b/util/SMALL_plot.m	Fri Aug 12 11:17:47 2011 +0100
@@ -21,7 +21,7 @@
   for i =1:m
   
   subplot(m,n, (i-1)*n+1); plot(1:length(SMALL.solver(i).solution), SMALL.solver(i).solution, 'b')
-        title(sprintf('%s(%s) in %.2f s', SMALL.solver(i).name, SMALL.solver(i).param,SMALL.solver(i).time),'Interpreter','none')
+        title(sprintf('%s in %.2f s', SMALL.solver(i).name, SMALL.solver(i).time),'Interpreter','none')
           xlabel('Coefficient') 
      
   % Plot reconstructed signal against original
@@ -31,7 +31,7 @@
       
       subplot(m,n,(i-1)*n+j); plot(1:length(SMALL.solver(i).reconstructed(:,j-1)), SMALL.solver(i).reconstructed(:,j-1)  ,'b.-',  1:length(SMALL.Problem.signal(:,j-1)), SMALL.Problem.signal(:,j-1),'r--')
           %legend(SMALL.solver(i).name,'Original signal',0,'Location','SouthOutside','Interpreter','none');
-               title(sprintf('%s(%s) in %.2f s signal %d', SMALL.solver(i).name, SMALL.solver(i).param,SMALL.solver(i).time,n-1),'Interpreter','none')
+               title(sprintf('%s in %.2f s signal %d', SMALL.solver(i).name, SMALL.solver(i).time,n-1),'Interpreter','none')
   end
   end
 end
--- a/util/SMALL_solve.m	Fri Jul 29 12:35:52 2011 +0100
+++ b/util/SMALL_solve.m	Fri Aug 12 11:17:47 2011 +0100
@@ -48,7 +48,11 @@
 if strcmpi(solver.toolbox,'sparselab')
     y = eval([solver.name,'(SparseLab_A, b, n,',solver.param,');']);
 elseif strcmpi(solver.toolbox,'sparsify')
-    y = eval([solver.name,'(b, A, n, ''P_trans'', AT,',solver.param,');']);
+    if isa(Problem.A,'float')
+        y = eval([solver.name,'(b, A, n,',solver.param,');']);
+    else
+        y = eval([solver.name,'(b, A, n, ''P_trans'', AT,',solver.param,');']);
+    end
 elseif (strcmpi(solver.toolbox,'spgl1')||strcmpi(solver.toolbox,'gpsr'))
     y = eval([solver.name,'(b, A,',solver.param,');']);
 elseif (strcmpi(solver.toolbox,'SPAMS'))
@@ -66,27 +70,35 @@
     y = eval([solver.name,'(A, b, G,epsilon,''maxatoms'',maxatoms,''checkdict'',''off'');']);
 elseif (strcmpi(solver.toolbox, 'ompsbox'))
     basedict = Problem.basedict;
-     if issparse(Problem.A)
+    if issparse(Problem.A)
         A = Problem.A;
-      else
+    else
         A = sparse(Problem.A);
-      end
+    end
     G = dicttsep(basedict,A,dictsep(basedict,A,speye(size(A,2))));
     epsilon=solver.param.epsilon;
     maxatoms=solver.param.maxatoms;
     y = eval([solver.name,'(basedict, A, b, G,epsilon,''maxatoms'',maxatoms,''checkdict'',''off'');']);
     Problem.sparse=1;
-%   To introduce new sparse representation algorithm put the files in
-%   your Matlab path. Next, unique name <TolboxID> for your toolbox and
-%   prefferd API <Preffered_API> needs to be defined.
-%
-% elseif strcmpi(solver.toolbox,'<ToolboxID>')
-%
-%     % - Evaluate the function (solver.name - defined in the main) with
-%     %   parameters given above
-%
-%     y = eval([solver.name,'(<Preffered_API>);']);
-
+elseif (strcmpi(solver.toolbox, 'ALPS'))
+    if ~isa(Problem.A,'float')
+        % ALPS does not accept implicit dictionary definition
+        A = opToMatrix(Problem.A, 1);
+    end
+    [y, numiter, time, y_path] = wrapper_ALPS_toolbox(b, A, solver.param);
+    
+    
+    %   To introduce new sparse representation algorithm put the files in
+    %   your Matlab path. Next, unique name <TolboxID> for your toolbox and
+    %   prefferd API <Preffered_API> needs to be defined.
+    %
+    % elseif strcmpi(solver.toolbox,'<ToolboxID>')
+    %
+    %     % - Evaluate the function (solver.name - defined in the main) with
+    %     %   parameters given above
+    %
+    %     y = eval([solver.name,'(<Preffered_API>);']);
+    
 else
     printf('\nToolbox has not been registered. Please change SMALL_solver file.\n');
     return