Mercurial > hg > smallbox
diff toolboxes/alps/ALPS/one_ALPS.m @ 154:0de08f68256b ivand_dev
ALPS toolbox - Algebraic Pursuit added to smallbox
author | Ivan Damnjanovic lnx <ivan.damnjanovic@eecs.qmul.ac.uk> |
---|---|
date | Fri, 12 Aug 2011 11:17:47 +0100 |
parents | |
children |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/alps/ALPS/one_ALPS.m Fri Aug 12 11:17:47 2011 +0100 @@ -0,0 +1,252 @@ +function [x_hat, numiter, x_path] = one_ALPS(y, Phi, K, params) +% ========================================================================= +% 1-ALPS(#) algorithm - Beta Version +% ========================================================================= +% Algebraic Pursuit (ALPS) algorithm with 1-memory acceleration. +% +% Detailed discussion on the algorithm can be found in +% [1] "On Accelerated Hard Thresholding Methods for Sparse Approximation", written +% by Volkan Cevher, Technical Report, 2011. +% ========================================================================= +% INPUT ARGUMENTS: +% y M x 1 undersampled measurement vector. +% Phi M x N regression matrix. +% K Sparsity of underlying vector x* or desired +% sparsity of solution. +% params Structure of parameters. These are: +% +% tol,... Early stopping tolerance. Default value: tol = +% 1-e5. +% ALPSiters,... Maximum number of algorithm iterations. Default +% value: 300. +% solveNewtonb,... If solveNewtonb == 1: Corresponds to solving a +% Newton system restricted to a sparse support. +% It is implemented via conjugate gradients. +% If solveNewtonb == 0: Step size selection as described +% in eqs. (12) and (13) in [1]. +% Default value: solveNewtonb = 0. +% gradientDescentx,... If gradientDescentx == 1: single gradient +% update of x_{i+1} restricted ot its support with +% line search. Default value: gradientDescentx = +% 1. +% solveNewtonx,... If solveNewtonx == 1: Akin to Hard Thresholding Pursuit +% (c.f. Simon Foucart, "Hard Thresholding Pursuit," +% preprint, 2010). Default vale: solveNewtonx = 0. +% tau,... Variable that controls the momentum in +% non-memoryless case. Ignored in memoryless +% case. Default value: tau = 1/2. +% Special cases: +% - tau = 0: momentum step size selection is +% driven by the following formulas: +% a_1 = 1; +% a_{i+1} = (1+\sqrt(1+4a_i^2)/2; +% tau = (a_i - 1)/(a_{i+1}); +% described in [2] "A fast iterative +% shrinkage-thresholding algorithm for linear +% inverse problems", Beck A., and Teboulle M. +% - tau = -1: momentum step size is automatically +% optimized in every step. +% - tau as a function handle: user defined +% behavior of tau momentum term. +% mu,... Variable that controls the step size selection. +% When mu = 0, step size is computed adaptively +% per iteration. Default value: mu = 0. +% cg_maxiter,... Maximum iterations for Conjugate-Gradients method. +% cg_tol Tolerance variable for Conjugate-Gradients method. +% ========================================================================= +% OUTPUT ARGUMENTS: +% x_hat N x 1 recovered K-sparse vector. +% numiter Number of iterations executed. +% x_path Keeps a series of computed N x 1 K-sparse vectors +% until the end of the iterative process. +% ========================================================================= +% 01/04/2011, by Anastasios Kyrillidis. anastasios.kyrillidis@epfl.ch, EPFL. +% ========================================================================= +% cgsolve.m is written by Justin Romberg, Caltech, Oct. 2005. +% Email: jrom@acm.caltech.edu +% ========================================================================= +% This work was supported in part by the European Commission under Grant +% MIRG-268398 and DARPA KeCoM program #11-DARPA-1055. VC also would like +% to acknowledge Rice University for his Faculty Fellowship. +% ========================================================================= + +[M,N] = size(Phi); + +%% Initialize transpose of measurement matrix + +Phi_t = Phi'; + +%% Initialize to zero vector +x_cur = zeros(N,1); +y_cur = zeros(N,1); +Phi_x_cur = zeros(M,1); +Y_i = []; + +x_path = zeros(N, params.ALPSiters); + +%% CG params +if (params.solveNewtonx == 1 || params.solveNewtonb == 1) + cg_verbose = 0; + cg_A = Phi_t*Phi; + cg_b = Phi_t*y; +end; + +%% Determine momentum step size selection strategy +fista = 0; +optimizeTau = 0; +a_prev = 1; +function_tau = 0; + +if (isa(params.tau,'float')) + function_tau = 0; + if (params.tau == 0) + fista = 1; + optimizeTau = 0; + elseif (params.tau == -1) + optimizeTau = 1; + fista = 0; + end; +elseif (isa(params.tau, 'function_handle')) + function_tau = 1; +end; + +%% Determine step size selection strategy +function_mu = 0; +adaptive_mu = 0; + +if (isa(params.mu,'float')) + function_mu = 0; + if (params.mu == 0) + adaptive_mu = 1; + else + adaptive_mu = 0; + end; +elseif (isa(params.mu,'function_handle')) + function_mu = 1; +end; + +%% Help variables +complementary_Yi = ones(N,1); + +i = 1; +%% 1-ALPS(#) +while (i <= params.ALPSiters) + x_path(:,i) = x_cur; + x_prev = x_cur; + + % Compute the residual + if (i == 1) + res = y; + % Compute the derivative + der = Phi_t*res; + else + % Compute the derivative + if (optimizeTau) + res = y - Phi_x_cur - params.tau*Phi_diff; + else + res = y - Phi(:,Y_i)*y_cur(Y_i); + end; + der = Phi_t*res; + end; + + Phi_x_prev = Phi_x_cur; + + % Determine S_i set via eq. (11) (change of variable from x_i to y_i) + complementary_Yi(Y_i) = 0; + [tmpArg, ind_der] = sort(abs(der).*complementary_Yi, 'descend'); + complementary_Yi(Y_i) = 1; + S_i = [Y_i; ind_der(1:K)]; + + ider = der(S_i); + if (params.solveNewtonb == 1) + % Compute least squares solution of the system A*y = (A*A)x using CG + if (params.useCG == 1) + [b, tmpArg, tmpArg] = cgsolve(cg_A(S_i, S_i), cg_b(S_i), params.cg_tol, params.cg_maxiter, cg_verbose); + else + b = cg_A(S_i,S_i)\cg_b(S_i); + end; + else + % Step size selection via eq. (12) and eq. (13) (change of variable from x_i to y_i) + if (adaptive_mu) + Pder = Phi(:,S_i)*ider; + mu_bar = ider'*ider/(Pder'*Pder); + b = y_cur(S_i) + (mu_bar)*ider; + elseif (function_mu) + b = y_cur(S_i) + params.mu(i)*ider; + else b = y_cur(S_i) + params.mu*ider; + end; + end; + + % Hard-threshold b and compute X_{i+1} + [tmpArg, ind_b] = sort(abs(b), 'descend'); + x_cur = zeros(N,1); + x_cur(S_i(ind_b(1:K))) = b(ind_b(1:K)); + X_i = S_i(ind_b(1:K)); + + if (params.gradientDescentx == 1) + % Calculate gradient of estimated vector x_cur + Phi_x_cur = Phi(:,X_i)*x_cur(X_i); + res = y - Phi_x_cur; + der = Phi_t*res; + ider = der(X_i); + + if (adaptive_mu) + Pder = Phi(:,X_i)*ider; + mu_bar = ider'*ider/(Pder'*Pder); + x_cur(X_i) = x_cur(X_i) + mu_bar*ider; + elseif (function_mu) + x_cur(X_i) = x_cur(X_i) + params.mu(i)*ider; + else x_cur(X_i) = x_cur(X_i) + params.mu*ider; + end; + elseif (params.solveNewtonx == 1) + % Similar to HTP + if (params.useCG == 1) + [v, tmpArg, tmpArg] = cgsolve(cg_A(X_i, X_i), cg_b(X_i), params.cg_tol, params.cg_maxiter, cg_verbose); + else + v = cg_A(X_i, X_i)\cg_b(X_i); + end; + x_cur(X_i) = v; + end; + + if (~function_tau) % If tau is not a function handle... + if (fista) % Fista configuration + a_cur = (1 + sqrt(1 + 4*a_prev^2))/2; + params.tau = (a_prev - 1)/a_cur; + a_prev = a_cur; + elseif (optimizeTau) % Compute optimized tau + + % tau = argmin ||u - Phi*y_{i+1}|| + % = <res, Phi*(x_cur - x_prev)>/||Phi*(x_cur - x_prev)||^2 + + Phi_x_cur = Phi(:,X_i)*x_cur(X_i); + res = y - Phi_x_cur; + if (i == 1) + Phi_diff = Phi_x_cur; + else + Phi_diff = Phi_x_cur - Phi_x_prev; + end; + params.tau = res'*Phi_diff/(Phi_diff'*Phi_diff); + end; + + y_cur = x_cur + params.tau*(x_cur - x_prev); + Y_i = find(ne(y_cur, 0)); + else + y_cur = x_cur + params.tau(i)*(x_cur - x_prev); + Y_i = find(ne(y_cur, 0)); + end; + + % Test stopping criterion + if (i > 1) && (norm(x_cur - x_prev) < params.tol*norm(x_cur)) + break; + end; + i = i + 1; +end; + +x_hat = x_cur; +numiter= i; + +if (i > params.ALPSiters) + x_path = x_path(:,1:numiter-1); +else + x_path = x_path(:,1:numiter); +end;