Mercurial > hg > pycsalgos
comparison pyCSalgos/GAP/GAP.py @ 21:2805288d6656
2011 11 08 Worked from school. Commit 2
author | nikcleju |
---|---|
date | Tue, 08 Nov 2011 14:45:35 +0000 |
parents | |
children | dd0e78b5bb13 |
comparison
equal
deleted
inserted
replaced
20:45255b0a6dba | 21:2805288d6656 |
---|---|
1 # -*- coding: utf-8 -*- | |
2 """ | |
3 Created on Thu Oct 13 14:05:22 2011 | |
4 | |
5 @author: ncleju | |
6 """ | |
7 | |
8 #from numpy import * | |
9 #from scipy import * | |
10 import numpy as np | |
11 import scipy as sp | |
12 import scipy.stats #from scipy import stats | |
13 import scipy.linalg #from scipy import lnalg | |
14 import math | |
15 | |
16 from numpy.random import RandomState | |
17 rng = RandomState() | |
18 | |
19 def Generate_Analysis_Operator(d, p): | |
20 # generate random tight frame with equal column norms | |
21 if p == d: | |
22 T = rng.randn(d,d); | |
23 [Omega, discard] = np.qr(T); | |
24 else: | |
25 Omega = rng.randn(p, d); | |
26 T = np.zeros((p, d)); | |
27 tol = 1e-8; | |
28 max_j = 200; | |
29 j = 1; | |
30 while (sum(sum(abs(T-Omega))) > np.dot(tol,np.dot(p,d)) and j < max_j): | |
31 j = j + 1; | |
32 T = Omega; | |
33 [U, S, Vh] = sp.linalg.svd(Omega); | |
34 V = Vh.T | |
35 #Omega = U * [eye(d); zeros(p-d,d)] * V'; | |
36 Omega2 = np.dot(np.dot(U, np.concatenate((np.eye(d), np.zeros((p-d,d))))), V.transpose()) | |
37 #Omega = diag(1./sqrt(diag(Omega*Omega')))*Omega; | |
38 Omega = np.dot(np.diag(1.0 / np.sqrt(np.diag(np.dot(Omega2,Omega2.transpose())))), Omega2) | |
39 #end | |
40 ##disp(j); | |
41 #end | |
42 return Omega | |
43 | |
44 | |
45 def Generate_Data_Known_Omega(Omega, d,p,m,k,noiselevel, numvectors, normstr): | |
46 #function [x0,y,M,LambdaMat] = Generate_Data_Known_Omega(Omega, d,p,m,k,noiselevel, numvectors, normstr) | |
47 | |
48 # Building an analysis problem, which includes the ingredients: | |
49 # - Omega - the analysis operator of size p*d | |
50 # - M is anunderdetermined measurement matrix of size m*d (m<d) | |
51 # - x0 is a vector of length d that satisfies ||Omega*x0||=p-k | |
52 # - Lambda is the true location of these k zeros in Omega*x0 | |
53 # - a measurement vector y0=Mx0 is computed | |
54 # - noise contaminated measurement vector y is obtained by | |
55 # y = y0 + n where n is an additive gaussian noise with norm(n,2)/norm(y0,2) = noiselevel | |
56 # Added by Nic: | |
57 # - Omega = analysis operator | |
58 # - normstr: if 'l0', generate l0 sparse vector (unchanged). If 'l1', | |
59 # generate a vector of Laplacian random variables (gamma) and | |
60 # pseudoinvert to find x | |
61 | |
62 # Omega is known as input parameter | |
63 #Omega=Generate_Analysis_Operator(d, p); | |
64 # Omega = randn(p,d); | |
65 # for i = 1:size(Omega,1) | |
66 # Omega(i,:) = Omega(i,:) / norm(Omega(i,:)); | |
67 # end | |
68 | |
69 #Init | |
70 LambdaMat = np.zeros((k,numvectors)) | |
71 x0 = np.zeros((d,numvectors)) | |
72 y = np.zeros((m,numvectors)) | |
73 realnoise = np.zeros((m,numvectors)) | |
74 | |
75 M = rng.randn(m,d); | |
76 | |
77 #for i=1:numvectors | |
78 for i in range(0,numvectors): | |
79 | |
80 # Generate signals | |
81 #if strcmp(normstr,'l0') | |
82 if normstr == 'l0': | |
83 # Unchanged | |
84 | |
85 #Lambda=randperm(p); | |
86 Lambda = rng.permutation(int(p)); | |
87 Lambda = np.sort(Lambda[0:k]); | |
88 LambdaMat[:,i] = Lambda; # store for output | |
89 | |
90 # The signal is drawn at random from the null-space defined by the rows | |
91 # of the matreix Omega(Lambda,:) | |
92 [U,D,Vh] = sp.linalg.svd(Omega[Lambda,:]); | |
93 V = Vh.T | |
94 NullSpace = V[:,k:]; | |
95 #print np.dot(NullSpace, rng.randn(d-k,1)).shape | |
96 #print x0[:,i].shape | |
97 x0[:,i] = np.squeeze(np.dot(NullSpace, rng.randn(d-k,1))); | |
98 # Nic: add orthogonality noise | |
99 # orthonoiseSNRdb = 6; | |
100 # n = randn(p,1); | |
101 # #x0(:,i) = x0(:,i) + n / norm(n)^2 * norm(x0(:,i))^2 / 10^(orthonoiseSNRdb/10); | |
102 # n = n / norm(n)^2 * norm(Omega * x0(:,i))^2 / 10^(orthonoiseSNRdb/10); | |
103 # x0(:,i) = pinv(Omega) * (Omega * x0(:,i) + n); | |
104 | |
105 #elseif strcmp(normstr, 'l1') | |
106 elif normstr == 'l1': | |
107 print('Nic says: not implemented yet') | |
108 raise Exception('Nic says: not implemented yet') | |
109 #gamma = laprnd(p,1,0,1); | |
110 #x0(:,i) = Omega \ gamma; | |
111 else: | |
112 #error('normstr must be l0 or l1!'); | |
113 print('Nic says: not implemented yet') | |
114 raise Exception('Nic says: not implemented yet') | |
115 #end | |
116 | |
117 # Acquire measurements | |
118 y[:,i] = np.dot(M, x0[:,i]) | |
119 | |
120 # Add noise | |
121 t_norm = np.linalg.norm(y[:,i],2); | |
122 n = np.squeeze(rng.randn(m, 1)); | |
123 y[:,i] = y[:,i] + noiselevel * t_norm * n / np.linalg.norm(n, 2); | |
124 realnoise[:,i] = noiselevel * t_norm * n / np.linalg.norm(n, 2) | |
125 #end | |
126 | |
127 return x0,y,M,LambdaMat,realnoise | |
128 | |
129 ##################### | |
130 | |
131 #function [xhat, arepr, lagmult] = ArgminOperL2Constrained(y, M, MH, Omega, OmegaH, Lambdahat, xinit, ilagmult, params) | |
132 def ArgminOperL2Constrained(y, M, MH, Omega, OmegaH, Lambdahat, xinit, ilagmult, params): | |
133 | |
134 # | |
135 # This function aims to compute | |
136 # xhat = argmin || Omega(Lambdahat, :) * x ||_2 subject to || y - M*x ||_2 <= epsilon. | |
137 # arepr is the analysis representation corresponding to Lambdahat, i.e., | |
138 # arepr = Omega(Lambdahat, :) * xhat. | |
139 # The function also returns the lagrange multiplier in the process used to compute xhat. | |
140 # | |
141 # Inputs: | |
142 # y : observation/measurements of an unknown vector x0. It is equal to M*x0 + noise. | |
143 # M : Measurement matrix | |
144 # MH : M', the conjugate transpose of M | |
145 # Omega : analysis operator | |
146 # OmegaH : Omega', the conjugate transpose of Omega. Also, synthesis operator. | |
147 # Lambdahat : an index set indicating some rows of Omega. | |
148 # xinit : initial estimate that will be used for the conjugate gradient algorithm. | |
149 # ilagmult : initial lagrange multiplier to be used in | |
150 # params : parameters | |
151 # params.noise_level : this corresponds to epsilon above. | |
152 # params.max_inner_iteration : `maximum' number of iterations in conjugate gradient method. | |
153 # params.l2_accurary : the l2 accuracy parameter used in conjugate gradient method | |
154 # params.l2solver : if the value is 'pseudoinverse', then direct matrix computation (not conjugate gradient method) is used. Otherwise, conjugate gradient method is used. | |
155 # | |
156 | |
157 #d = length(xinit) | |
158 d = xinit.size | |
159 lagmultmax = 1e5; | |
160 lagmultmin = 1e-4; | |
161 lagmultfactor = 2.0; | |
162 accuracy_adjustment_exponent = 4/5.; | |
163 lagmult = max(min(ilagmult, lagmultmax), lagmultmin); | |
164 was_infeasible = 0; | |
165 was_feasible = 0; | |
166 | |
167 ####################################################################### | |
168 ## Computation done using direct matrix computation from matlab. (no conjugate gradient method.) | |
169 ####################################################################### | |
170 #if strcmp(params.l2solver, 'pseudoinverse') | |
171 if params['l2solver'] == 'pseudoinverse': | |
172 #if strcmp(class(M), 'double') && strcmp(class(Omega), 'double') | |
173 if M.dtype == 'float64' and Omega.dtype == 'double': | |
174 while True: | |
175 alpha = math.sqrt(lagmult); | |
176 xhat = np.linalg.lstsq(np.concatenate((M, alpha*Omega[Lambdahat,:])), np.concatenate((y, np.zeros(Lambdahat.size))))[0] | |
177 temp = np.linalg.norm(y - np.dot(M,xhat), 2); | |
178 #disp(['fidelity error=', num2str(temp), ' lagmult=', num2str(lagmult)]); | |
179 if temp <= params['noise_level']: | |
180 was_feasible = True; | |
181 if was_infeasible: | |
182 break; | |
183 else: | |
184 lagmult = lagmult*lagmultfactor; | |
185 elif temp > params['noise_level']: | |
186 was_infeasible = True; | |
187 if was_feasible: | |
188 xhat = xprev.copy(); | |
189 break; | |
190 lagmult = lagmult/lagmultfactor; | |
191 if lagmult < lagmultmin or lagmult > lagmultmax: | |
192 break; | |
193 xprev = xhat.copy(); | |
194 arepr = np.dot(Omega[Lambdahat, :], xhat); | |
195 return xhat,arepr,lagmult; | |
196 | |
197 | |
198 ######################################################################## | |
199 ## Computation using conjugate gradient method. | |
200 ######################################################################## | |
201 #if strcmp(class(MH),'function_handle') | |
202 if hasattr(MH, '__call__'): | |
203 b = MH(y); | |
204 else: | |
205 b = np.dot(MH, y); | |
206 | |
207 norm_b = np.linalg.norm(b, 2); | |
208 xhat = xinit.copy(); | |
209 xprev = xinit.copy(); | |
210 residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH, Lambdahat, lagmult) - b; | |
211 direction = -residual; | |
212 iter = 0; | |
213 | |
214 while iter < params.max_inner_iteration: | |
215 iter = iter + 1; | |
216 alpha = np.linalg.norm(residual,2)**2 / np.dot(direction.T, TheHermitianMatrix(direction, M, MH, Omega, OmegaH, Lambdahat, lagmult)); | |
217 xhat = xhat + alpha*direction; | |
218 prev_residual = residual.copy(); | |
219 residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH, Lambdahat, lagmult) - b; | |
220 beta = np.linalg.norm(residual,2)**2 / np.linalg.norm(prev_residual,2)**2; | |
221 direction = -residual + beta*direction; | |
222 | |
223 if np.linalg.norm(residual,2)/norm_b < params['l2_accuracy']*(lagmult**(accuracy_adjustment_exponent)) or iter == params['max_inner_iteration']: | |
224 #if strcmp(class(M), 'function_handle') | |
225 if hasattr(M, '__call__'): | |
226 temp = np.linalg.norm(y-M(xhat), 2); | |
227 else: | |
228 temp = np.linalg.norm(y-np.dot(M,xhat), 2); | |
229 | |
230 #if strcmp(class(Omega), 'function_handle') | |
231 if hasattr(Omega, '__call__'): | |
232 u = Omega(xhat); | |
233 u = math.sqrt(lagmult)*np.linalg.norm(u(Lambdahat), 2); | |
234 else: | |
235 u = math.sqrt(lagmult)*np.linalg.norm(Omega[Lambdahat,:]*xhat, 2); | |
236 | |
237 | |
238 #disp(['residual=', num2str(norm(residual,2)), ' norm_b=', num2str(norm_b), ' omegapart=', num2str(u), ' fidelity error=', num2str(temp), ' lagmult=', num2str(lagmult), ' iter=', num2str(iter)]); | |
239 | |
240 if temp <= params['noise_level']: | |
241 was_feasible = True; | |
242 if was_infeasible: | |
243 break; | |
244 else: | |
245 lagmult = lagmultfactor*lagmult; | |
246 residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH, Lambdahat, lagmult) - b; | |
247 direction = -residual; | |
248 iter = 0; | |
249 elif temp > params['noise_level']: | |
250 lagmult = lagmult/lagmultfactor; | |
251 if was_feasible: | |
252 xhat = xprev.copy(); | |
253 break; | |
254 was_infeasible = True; | |
255 residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH, Lambdahat, lagmult) - b; | |
256 direction = -residual; | |
257 iter = 0; | |
258 if lagmult > lagmultmax or lagmult < lagmultmin: | |
259 break; | |
260 xprev = xhat.copy(); | |
261 #elseif norm(xprev-xhat)/norm(xhat) < 1e-2 | |
262 # disp(['rel_change=', num2str(norm(xprev-xhat)/norm(xhat))]); | |
263 # if strcmp(class(M), 'function_handle') | |
264 # temp = norm(y-M(xhat), 2); | |
265 # else | |
266 # temp = norm(y-M*xhat, 2); | |
267 # end | |
268 # | |
269 # if temp > 1.2*params.noise_level | |
270 # was_infeasible = 1; | |
271 # lagmult = lagmult/lagmultfactor; | |
272 # xprev = xhat; | |
273 # end | |
274 | |
275 #disp(['fidelity_error=', num2str(temp)]); | |
276 print 'fidelity_error=',temp | |
277 #if iter == params['max_inner_iteration']: | |
278 #disp('max_inner_iteration reached. l2_accuracy not achieved.'); | |
279 | |
280 ## | |
281 # Compute analysis representation for xhat | |
282 ## | |
283 #if strcmp(class(Omega),'function_handle') | |
284 if hasattr(Omega, '__call__'): | |
285 temp = Omega(xhat); | |
286 arepr = temp(Lambdahat); | |
287 else: ## here Omega is assumed to be a matrix | |
288 arepr = np.dot(Omega[Lambdahat, :], xhat); | |
289 | |
290 return xhat,arepr,lagmult | |
291 | |
292 | |
293 ## | |
294 # This function computes (M'*M + lm*Omega(L,:)'*Omega(L,:)) * x. | |
295 ## | |
296 #function w = TheHermitianMatrix(x, M, MH, Omega, OmegaH, L, lm) | |
297 def TheHermitianMatrix(x, M, MH, Omega, OmegaH, L, lm): | |
298 #if strcmp(class(M), 'function_handle') | |
299 if hasattr(M, '__call__'): | |
300 w = MH(M(x)); | |
301 else: ## M and MH are matrices | |
302 w = np.dot(np.dot(MH, M), x); | |
303 | |
304 if hasattr(Omega, '__call__'): | |
305 v = Omega(x); | |
306 vt = np.zeros(v.size); | |
307 vt[L] = v[L].copy(); | |
308 w = w + lm*OmegaH(vt); | |
309 else: ## Omega is assumed to be a matrix and OmegaH is its conjugate transpose | |
310 w = w + lm*np.dot(np.dot(OmegaH[:, L],Omega[L, :]),x); | |
311 | |
312 return w | |
313 | |
314 def GAP(y, M, MH, Omega, OmegaH, params, xinit): | |
315 #function [xhat, Lambdahat] = GAP(y, M, MH, Omega, OmegaH, params, xinit) | |
316 | |
317 ## | |
318 # [xhat, Lambdahat] = GAP(y, M, MH, Omega, OmegaH, params, xinit) | |
319 # | |
320 # Greedy Analysis Pursuit Algorithm | |
321 # This aims to find an approximate (sometimes exact) solution of | |
322 # xhat = argmin || Omega * x ||_0 subject to || y - M * x ||_2 <= epsilon. | |
323 # | |
324 # Outputs: | |
325 # xhat : estimate of the target cosparse vector x0. | |
326 # Lambdahat : estimate of the cosupport of x0. | |
327 # | |
328 # Inputs: | |
329 # y : observation/measurement vector of a target cosparse solution x0, | |
330 # given by relation y = M * x0 + noise. | |
331 # M : measurement matrix. This should be given either as a matrix or as a function handle | |
332 # which implements linear transformation. | |
333 # MH : conjugate transpose of M. | |
334 # Omega : analysis operator. Like M, this should be given either as a matrix or as a function | |
335 # handle which implements linear transformation. | |
336 # OmegaH : conjugate transpose of OmegaH. | |
337 # params : parameters that govern the behavior of the algorithm (mostly). | |
338 # params.num_iteration : GAP performs this number of iterations. | |
339 # params.greedy_level : determines how many rows of Omega GAP eliminates at each iteration. | |
340 # if the value is < 1, then the rows to be eliminated are determined by | |
341 # j : |omega_j * xhat| > greedy_level * max_i |omega_i * xhat|. | |
342 # if the value is >= 1, then greedy_level is the number of rows to be | |
343 # eliminated at each iteration. | |
344 # params.stopping_coefficient_size : when the maximum analysis coefficient is smaller than | |
345 # this, GAP terminates. | |
346 # params.l2solver : legitimate values are 'pseudoinverse' or 'cg'. determines which method | |
347 # is used to compute | |
348 # argmin || Omega_Lambdahat * x ||_2 subject to || y - M * x ||_2 <= epsilon. | |
349 # params.l2_accuracy : when l2solver is 'cg', this determines how accurately the above | |
350 # problem is solved. | |
351 # params.noise_level : this corresponds to epsilon above. | |
352 # xinit : initial estimate of x0 that GAP will start with. can be zeros(d, 1). | |
353 # | |
354 # Examples: | |
355 # | |
356 # Not particularly interesting: | |
357 # >> d = 100; p = 110; m = 60; | |
358 # >> M = randn(m, d); | |
359 # >> Omega = randn(p, d); | |
360 # >> y = M * x0 + noise; | |
361 # >> params.num_iteration = 40; | |
362 # >> params.greedy_level = 0.9; | |
363 # >> params.stopping_coefficient_size = 1e-4; | |
364 # >> params.l2solver = 'pseudoinverse'; | |
365 # >> [xhat, Lambdahat] = GAP(y, M, M', Omega, Omega', params, zeros(d, 1)); | |
366 # | |
367 # Assuming that FourierSampling.m, FourierSamplingH.m, FDAnalysis.m, etc. exist: | |
368 # >> n = 128; | |
369 # >> M = @(t) FourierSampling(t, n); | |
370 # >> MH = @(u) FourierSamplingH(u, n); | |
371 # >> Omega = @(t) FDAnalysis(t, n); | |
372 # >> OmegaH = @(u) FDSynthesis(t, n); | |
373 # >> params.num_iteration = 1000; | |
374 # >> params.greedy_level = 50; | |
375 # >> params.stopping_coefficient_size = 1e-5; | |
376 # >> params.l2solver = 'cg'; # in fact, 'pseudoinverse' does not even make sense. | |
377 # >> [xhat, Lambdahat] = GAP(y, M, MH, Omega, OmegaH, params, zeros(d, 1)); | |
378 # | |
379 # Above: FourierSampling and FourierSamplingH are conjugate transpose of each other. | |
380 # FDAnalysis and FDSynthesis are conjugate transpose of each other. | |
381 # These routines are problem specific and need to be implemented by the user. | |
382 | |
383 #d = length(xinit(:)); | |
384 d = xinit.size | |
385 | |
386 #if strcmp(class(Omega), 'function_handle') | |
387 # p = length(Omega(zeros(d,1))); | |
388 #else ## Omega is a matrix | |
389 # p = size(Omega, 1); | |
390 #end | |
391 if hasattr(Omega, '__call__'): | |
392 p = Omega(np.zeros((d,1))) | |
393 else: | |
394 p = Omega.shape[0] | |
395 | |
396 | |
397 iter = 0 | |
398 lagmult = 1e-4 | |
399 #Lambdahat = 1:p; | |
400 Lambdahat = np.arange(p) | |
401 #while iter < params.num_iteration | |
402 while iter < params["num_iteration"]: | |
403 iter = iter + 1 | |
404 #[xhat, analysis_repr, lagmult] = ArgminOperL2Constrained(y, M, MH, Omega, OmegaH, Lambdahat, xinit, lagmult, params); | |
405 xhat,analysis_repr,lagmult = ArgminOperL2Constrained(y, M, MH, Omega, OmegaH, Lambdahat, xinit, lagmult, params) | |
406 #[to_be_removed, maxcoef] = FindRowsToRemove(analysis_repr, params.greedy_level); | |
407 to_be_removed, maxcoef = FindRowsToRemove(analysis_repr, params["greedy_level"]) | |
408 #disp(['** maxcoef=', num2str(maxcoef), ' target=', num2str(params.stopping_coefficient_size), ' rows_remaining=', num2str(length(Lambdahat)), ' lagmult=', num2str(lagmult)]); | |
409 #print '** maxcoef=',maxcoef,' target=',params['stopping_coefficient_size'],' rows_remaining=',Lambdahat.size,' lagmult=',lagmult | |
410 if check_stopping_criteria(xhat, xinit, maxcoef, lagmult, Lambdahat, params): | |
411 break | |
412 | |
413 xinit = xhat.copy() | |
414 #Lambdahat[to_be_removed] = [] | |
415 Lambdahat = np.delete(Lambdahat, to_be_removed) | |
416 | |
417 #n = sqrt(d); | |
418 #figure(9); | |
419 #RR = zeros(2*n, n-1); | |
420 #RR(Lambdahat) = 1; | |
421 #XD = ones(n, n); | |
422 #XD(:, 2:end) = XD(:, 2:end) .* RR(1:n, :); | |
423 #XD(:, 1:(end-1)) = XD(:, 1:(end-1)) .* RR(1:n, :); | |
424 #XD(2:end, :) = XD(2:end, :) .* RR((n+1):(2*n), :)'; | |
425 #XD(1:(end-1), :) = XD(1:(end-1), :) .* RR((n+1):(2*n), :)'; | |
426 #XD = FD2DiagnosisPlot(n, Lambdahat); | |
427 #imshow(XD); | |
428 #figure(10); | |
429 #imshow(reshape(real(xhat), n, n)); | |
430 | |
431 #return; | |
432 return xhat, Lambdahat | |
433 | |
434 def FindRowsToRemove(analysis_repr, greedy_level): | |
435 #function [to_be_removed, maxcoef] = FindRowsToRemove(analysis_repr, greedy_level) | |
436 | |
437 #abscoef = abs(analysis_repr(:)); | |
438 abscoef = np.abs(analysis_repr) | |
439 #n = length(abscoef); | |
440 n = abscoef.size | |
441 #maxcoef = max(abscoef); | |
442 maxcoef = abscoef.max() | |
443 if greedy_level >= 1: | |
444 #qq = quantile(abscoef, 1.0-greedy_level/n); | |
445 qq = sp.stats.mstats.mquantile(abscoef, 1.0-greedy_level/n, 0.5, 0.5) | |
446 else: | |
447 qq = maxcoef*greedy_level | |
448 | |
449 #to_be_removed = find(abscoef >= qq); | |
450 to_be_removed = np.nonzero(abscoef >= qq) | |
451 #return; | |
452 return to_be_removed, maxcoef | |
453 | |
454 def check_stopping_criteria(xhat, xinit, maxcoef, lagmult, Lambdahat, params): | |
455 #function r = check_stopping_criteria(xhat, xinit, maxcoef, lagmult, Lambdahat, params) | |
456 | |
457 #if isfield(params, 'stopping_coefficient_size') && maxcoef < params.stopping_coefficient_size | |
458 if ('stopping_coefficient_size' in params) and maxcoef < params['stopping_coefficient_size']: | |
459 return 1 | |
460 | |
461 #if isfield(params, 'stopping_lagrange_multiplier_size') && lagmult > params.stopping_lagrange_multiplier_size | |
462 if ('stopping_lagrange_multiplier_size' in params) and lagmult > params['stopping_lagrange_multiplier_size']: | |
463 return 1 | |
464 | |
465 #if isfield(params, 'stopping_relative_solution_change') && norm(xhat-xinit)/norm(xhat) < params.stopping_relative_solution_change | |
466 if ('stopping_relative_solution_change' in params) and np.linalg.norm(xhat-xinit)/np.linalg.norm(xhat) < params['stopping_relative_solution_change']: | |
467 return 1 | |
468 | |
469 #if isfield(params, 'stopping_cosparsity') && length(Lambdahat) < params.stopping_cosparsity | |
470 if ('stopping_cosparsity' in params) and Lambdahat.size() < params['stopping_cosparsity']: | |
471 return 1 | |
472 | |
473 return 0 |