comparison toolboxes/alps/ALPS/zero_ALPS.m @ 154:0de08f68256b ivand_dev

ALPS toolbox - Algebraic Pursuit added to smallbox
author Ivan Damnjanovic lnx <ivan.damnjanovic@eecs.qmul.ac.uk>
date Fri, 12 Aug 2011 11:17:47 +0100
parents
children
comparison
equal deleted inserted replaced
153:af307f247ac7 154:0de08f68256b
1 function [x_hat, numiter, x_path] = zero_ALPS(y, Phi, K, params)
2 % =========================================================================
3 % 0-ALPS(#) algorithm - Beta Version
4 % =========================================================================
5 % Algebraic Pursuit (ALPS) algorithm with memoryless acceleration.
6 %
7 % Detailed discussion on the algorithm can be found in
8 % [1] "On Accelerated Hard Thresholding Methods for Sparse Approximation", written
9 % by Volkan Cevher, Technical Report, 2011.
10 % =========================================================================
11 % INPUT ARGUMENTS:
12 % y M x 1 undersampled measurement vector.
13 % Phi M x N regression matrix.
14 % K Sparsity of underlying vector x* or desired
15 % sparsity of solution.
16 % params Structure of parameters. These are:
17 %
18 % tol,... Early stopping tolerance. Default value: tol =
19 % 1-e5
20 % ALPSiters,... Maximum number of algorithm iterations. Default
21 % value: 300.
22 % mode, ... According to [1], possible values are
23 % [0,1,2,4,5,6]. This value comes from the binary
24 % representation of the parameters:
25 % (solveNewtob, gradientDescentx, solveNewtonx),
26 % which are explained next. Default value = 0.
27 % solveNewtonb,... If solveNewtonb == 1: Corresponds to solving a
28 % Newton system restricted to a sparse support.
29 % It is implemented via conjugate gradients.
30 % If solveNewtonb == 0: Step size selection as described
31 % in eqs. (12) and (13) in [1].
32 % Default value: solveNewtonb = 0.
33 % gradientDescentx,... If gradientDescentx == 1: single gradient
34 % update of x_{i+1} restricted ot its support with
35 % line search. Default value: gradientDescentx =
36 % 1.
37 % solveNewtonx,... If solveNewtonx == 1: Akin to Hard Thresholding Pursuit
38 % (c.f. Simon Foucart, "Hard Thresholding Pursuit,"
39 % preprint, 2010). Default vale: solveNewtonx = 0
40 % mu,... Variable that controls the step size selection.
41 % When mu = 0, step size is computed adaptively
42 % per iteration. Default value: mu = 0.
43 % cg_maxiter,... Maximum iterations for Conjugate-Gradients method.
44 % cg_tol Tolerance variable for Conjugate-Gradients method.
45 % =========================================================================
46 % OUTPUT ARGUMENTS:
47 % x_hat N x 1 recovered K-sparse vector.
48 % numiter Number of iterations executed.
49 % x_path Keeps a series of computed N x 1 K-sparse vectors
50 % until the end of the iterative process.
51 % =========================================================================
52 % 01/04/2011, by Anastasios Kyrillidis. anastasios.kyrillidis@epfl.ch, EPFL.
53 % =========================================================================
54 % cgsolve.m is written by Justin Romberg, Caltech, Oct. 2005.
55 % Email: jrom@acm.caltech.edu
56 % =========================================================================
57 % This work was supported in part by the European Commission under Grant
58 % MIRG-268398 and DARPA KeCoM program #11-DARPA-1055. VC also would like
59 % to acknowledge Rice University for his Faculty Fellowship.
60 % =========================================================================
61
62 [tmpArg, N] = size(Phi);
63
64 %% Initialize transpose of measurement matrix
65
66 Phi_t = Phi';
67
68 %% Initialize to zero vector
69 x_cur = zeros(N,1);
70 X_i = [];
71
72 x_path = zeros(N, params.ALPSiters);
73
74 %% CG params
75 if (params.mode == 1 || params.mode == 4 || params.mode == 5 || params.mode == 6)
76 cg_verbose = 0;
77 cg_A = Phi_t*Phi;
78 cg_b = Phi_t*y;
79 end;
80
81 %% Determine step size selection strategy
82 function_mu = 0;
83 adaptive_mu = 0;
84
85 if (isa(params.mu,'float'))
86 function_mu = 0;
87 if (params.mu == 0)
88 adaptive_mu = 1;
89 else
90 adaptive_mu = 0;
91 end;
92 elseif (isa(params.mu,'function_handle'))
93 function_mu = 1;
94 end;
95
96 %% Help variables
97 complementary_Xi = ones(N,1);
98
99 i = 1;
100 %% 0-ALPS(#)
101 while (i <= params.ALPSiters)
102 x_path(:,i) = x_cur;
103 x_prev = x_cur;
104
105 % Compute the residual
106 if (i == 1)
107 res = y;
108 else
109 Phi_x_cur = Phi(:,X_i)*x_cur(X_i);
110 res = y - Phi_x_cur;
111 end;
112
113 % Compute the derivative
114 der = Phi_t*res;
115
116 % Determine S_i set via eq. (11)
117 complementary_Xi(X_i) = 0;
118 [tmpArg, ind_der] = sort(abs(der).*complementary_Xi, 'descend');
119 complementary_Xi(X_i) = 1;
120 S_i = [X_i; ind_der(1:K)];
121
122 ider = der(S_i);
123 if (params.solveNewtonb == 1)
124 % Compute least squares solution of the system A*y = (A*A)x using CG
125 if (params.useCG == 1)
126 [b, tmpArg, tmpArg] = cgsolve(cg_A(S_i, S_i), cg_b(S_i), params.cg_tol, params.cg_maxiter, cg_verbose);
127 else
128 b = cg_A(S_i,S_i)\cg_b(S_i);
129 end;
130 else
131 % Step size selection via eq. (12) and eq. (13)
132 if (adaptive_mu)
133 Pder = Phi(:,S_i)*ider;
134 mu_bar = ider'*ider/(Pder'*Pder);
135 b = x_cur(S_i) + (mu_bar)*ider;
136 elseif (function_mu)
137 b = x_cur(S_i) + params.mu(i)*ider;
138 else
139 b = x_cur(S_i) + params.mu*ider;
140 end;
141 end;
142
143 % Hard-threshold b and compute X_{i+1}
144 [tmpArg, ind_b] = sort(abs(b), 'descend');
145 x_cur = zeros(N,1);
146 x_cur(S_i(ind_b(1:K))) = b(ind_b(1:K));
147 X_i = S_i(ind_b(1:K));
148
149 if (params.gradientDescentx == 1)
150 % Calculate gradient of estimated vector x_cur
151 Phi_x_cur = Phi(:,X_i)*x_cur(X_i);
152 res = y - Phi_x_cur;
153 der = Phi_t*res;
154 ider = der(X_i);
155
156 if (adaptive_mu)
157 Pder = Phi(:,X_i)*ider;
158 mu_bar = ider'*ider/(Pder'*Pder);
159 x_cur(X_i) = x_cur(X_i) + mu_bar*ider;
160 elseif (function_mu)
161 x_cur(X_i) = x_cur(X_i) + params.mu(i)*ider;
162 else x_cur(X_i) = x_cur(X_i) + params.mu*ider;
163 end;
164 elseif (params.solveNewtonx == 1)
165 % Similar to HTP
166 if (params.useCG == 1)
167 [v, tmpArg, tmpArg] = cgsolve(cg_A(X_i, X_i), cg_b(X_i), params.cg_tol, params.cg_maxiter, cg_verbose);
168 else
169 v = cg_A(X_i,X_i)\cg_b(X_i);
170 end;
171 x_cur(X_i) = v;
172 end;
173
174 % Test stopping criterion
175 if (i > 1) && (norm(x_cur - x_prev) < params.tol*norm(x_cur))
176 break;
177 end;
178 i = i + 1;
179
180 end;
181
182 x_hat = x_cur;
183 numiter = i;
184
185 if (i > params.ALPSiters)
186 x_path = x_path(:,1:numiter-1);
187 else
188 x_path = x_path(:,1:numiter);
189 end;