annotate tests/omp_test.py @ 2:735a0e24575c

Organized folders: added tests, apps, matlab, docs folders. Added __init__.py files
author nikcleju
date Fri, 21 Oct 2011 13:53:49 +0000
parents
children
rev   line source
nikcleju@2 1 """
nikcleju@2 2 #=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#
nikcleju@2 3 # Bob L. Sturm <bst@create.aau.dk> 20111018
nikcleju@2 4 # Department of Architecture, Design and Media Technology
nikcleju@2 5 # Aalborg University Copenhagen
nikcleju@2 6 # Lautrupvang 15, 2750 Ballerup, Denmark
nikcleju@2 7 #=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#
nikcleju@2 8 """
nikcleju@2 9
nikcleju@2 10 import unittest
nikcleju@2 11
nikcleju@2 12 import numpy as np
nikcleju@2 13 from sklearn.utils import check_random_state
nikcleju@2 14 import time
nikcleju@2 15
nikcleju@2 16 from omp_sk_bugfix import orthogonal_mp
nikcleju@2 17 from omp_QR import greed_omp_qr
nikcleju@2 18 from omp_QR import omp_qr
nikcleju@2 19
nikcleju@2 20 """
nikcleju@2 21 Run a problem suite involving sparse vectors in
nikcleju@2 22 ambientDimension dimensional space, with a resolution
nikcleju@2 23 in the phase plane of numGradations x numGradations,
nikcleju@2 24 and at each indeterminacy and sparsity pair run
nikcleju@2 25 numTrials independent trials.
nikcleju@2 26
nikcleju@2 27 Outputs a text file denoting successes at each phase point.
nikcleju@2 28 For more on phase transitions, see:
nikcleju@2 29 D. L. Donoho and J. Tanner, "Precise undersampling theorems,"
nikcleju@2 30 Proc. IEEE, vol. 98, no. 6, pp. 913-924, June 2010.
nikcleju@2 31 """
nikcleju@2 32
nikcleju@2 33 class CompareResults(unittest.TestCase):
nikcleju@2 34
nikcleju@2 35 def testCompareResults(self):
nikcleju@2 36 """OMP results should be almost the same with all implementations"""
nikcleju@2 37 ambientDimension = 400
nikcleju@2 38 numGradations = 30
nikcleju@2 39 numTrials = 1
nikcleju@2 40 runProblemSuite(ambientDimension,numGradations,numTrials, verbose=False)
nikcleju@2 41
nikcleju@2 42
nikcleju@2 43
nikcleju@2 44 def runProblemSuite(ambientDimension,numGradations,numTrials, verbose):
nikcleju@2 45
nikcleju@2 46 idx = np.arange(ambientDimension)
nikcleju@2 47 phaseDelta = np.linspace(0.05,1,numGradations)
nikcleju@2 48 phaseRho = np.linspace(0.05,1,numGradations)
nikcleju@2 49 success = np.zeros((numGradations, numGradations))
nikcleju@2 50
nikcleju@2 51 #Nic: init timers
nikcleju@2 52 t1all = 0
nikcleju@2 53 t2all = 0
nikcleju@2 54 t3all = 0
nikcleju@2 55
nikcleju@2 56 deltaCounter = 0
nikcleju@2 57 # delta is number of measurements/
nikcleju@2 58 for delta in phaseDelta[:17]:
nikcleju@2 59 rhoCounter = 0
nikcleju@2 60 for rho in phaseRho:
nikcleju@2 61 if verbose:
nikcleju@2 62 print(deltaCounter,rhoCounter)
nikcleju@2 63
nikcleju@2 64 numMeasurements = int(delta*ambientDimension)
nikcleju@2 65 sparsity = int(rho*numMeasurements)
nikcleju@2 66 # how do I set the following to be random each time?
nikcleju@2 67 generator = check_random_state(100)
nikcleju@2 68 # create unit norm dictionary
nikcleju@2 69 D = generator.randn(numMeasurements, ambientDimension)
nikcleju@2 70 D /= np.sqrt(np.sum((D ** 2), axis=0))
nikcleju@2 71 # compute Gramian (for efficiency)
nikcleju@2 72 DTD = np.dot(D.T,D)
nikcleju@2 73
nikcleju@2 74 successCounter = 0
nikcleju@2 75 trial = numTrials
nikcleju@2 76 while trial > 0:
nikcleju@2 77 # generate sparse signal with a minimum non-zero value
nikcleju@2 78 x = np.zeros((ambientDimension, 1))
nikcleju@2 79 idx2 = idx
nikcleju@2 80 generator.shuffle(idx2)
nikcleju@2 81 idx3 = idx2[:sparsity]
nikcleju@2 82 while np.min(np.abs(x[idx3,0])) < 1e-10 :
nikcleju@2 83 x[idx3,0] = generator.randn(sparsity)
nikcleju@2 84 # sense sparse signal
nikcleju@2 85 y = np.dot(D, x)
nikcleju@2 86
nikcleju@2 87 # Nic: Use sparsify OMP function (translated from Matlab)
nikcleju@2 88 ompopts = dict({'stopCrit':'M', 'stopTol':2*sparsity})
nikcleju@2 89 starttime = time.time() # start timer
nikcleju@2 90 x_r2, errs, times = greed_omp_qr(y.squeeze().copy(), D.copy(), D.shape[1], ompopts)
nikcleju@2 91 t2all = t2all + time.time() - starttime # stop timer
nikcleju@2 92 idx_r2 = np.nonzero(x_r2)[0]
nikcleju@2 93
nikcleju@2 94 # run to two times expected sparsity, or tolerance
nikcleju@2 95 # why? Often times, OMP can retrieve the correct solution
nikcleju@2 96 # when it is run for more than the expected sparsity
nikcleju@2 97 #x_r, idx_r = omp_qr(y,D,DTD,2*sparsity,1e-5)
nikcleju@2 98 # Nic: adjust tolerance to match with other function
nikcleju@2 99 starttime = time.time() # start timer
nikcleju@2 100 x_r, idx_r = omp_qr(y.copy(),D.copy(),DTD.copy(),2*sparsity,numMeasurements*1e-14/np.vdot(y,y))
nikcleju@2 101 t1all = t1all + time.time() - starttime # stop timer
nikcleju@2 102
nikcleju@2 103 # Nic: test sklearn omp
nikcleju@2 104 starttime = time.time() # start timer
nikcleju@2 105 x_r3 = orthogonal_mp(D.copy(), y.copy(), 2*sparsity, tol=numMeasurements*1e-14, precompute_gram=False, copy_X=True)
nikcleju@2 106 idx_r3 = np.nonzero(x_r3)[0]
nikcleju@2 107 t3all = t3all + time.time() - starttime # stop timer
nikcleju@2 108
nikcleju@2 109 # Nic: compare results
nikcleju@2 110 if verbose:
nikcleju@2 111 print 'diff1 = ',np.linalg.norm(x_r.squeeze() - x_r2.squeeze())
nikcleju@2 112 print 'diff2 = ',np.linalg.norm(x_r.squeeze() - x_r3.squeeze())
nikcleju@2 113 print 'diff3 = ',np.linalg.norm(x_r2.squeeze() - x_r3.squeeze())
nikcleju@2 114 print "Bob's total time = ", t1all
nikcleju@2 115 print "Nic's total time = ", t2all
nikcleju@2 116 print "Skl's total time = ", t3all
nikcleju@2 117 if np.linalg.norm(x_r.squeeze() - x_r2.squeeze()) > 1e-6 or \
nikcleju@2 118 np.linalg.norm(x_r.squeeze() - x_r3.squeeze()) > 1e-6 or \
nikcleju@2 119 np.linalg.norm(x_r2.squeeze() - x_r3.squeeze()) > 1e-6:
nikcleju@2 120 if verbose:
nikcleju@2 121 print "STOP: Different results"
nikcleju@2 122 print "Bob's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r).squeeze())
nikcleju@2 123 print "Nic's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r2).squeeze())
nikcleju@2 124 print "Skl's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r3).squeeze())
nikcleju@2 125 raise ValueError("Different results")
nikcleju@2 126
nikcleju@2 127 # debais to remove small entries
nikcleju@2 128 for nn in idx_r:
nikcleju@2 129 if abs(x_r[nn]) < 1e-10:
nikcleju@2 130 x_r[nn] = 0
nikcleju@2 131
nikcleju@2 132 # exact recovery condition using support
nikcleju@2 133 #if sorted(np.flatnonzero(x_r)) == sorted(np.flatnonzero(x)):
nikcleju@2 134 # successCounter += 1
nikcleju@2 135 # exact recovery condition using error in solution
nikcleju@2 136 error = x - x_r
nikcleju@2 137 """ the following is the exact recovery condition in: A. Maleki
nikcleju@2 138 and D. L. Donoho, "Optimally tuned iterative reconstruction
nikcleju@2 139 algorithms for compressed sensing," IEEE J. Selected Topics
nikcleju@2 140 in Signal Process., vol. 4, pp. 330-341, Apr. 2010. """
nikcleju@2 141 if np.vdot(error,error) < np.vdot(x,x)*1e-4:
nikcleju@2 142 successCounter += 1
nikcleju@2 143 trial -= 1
nikcleju@2 144
nikcleju@2 145 success[rhoCounter,deltaCounter] = successCounter
nikcleju@2 146 if successCounter == 0:
nikcleju@2 147 break
nikcleju@2 148
nikcleju@2 149 rhoCounter += 1
nikcleju@2 150 #np.savetxt('test.txt',success,fmt='#2.1d',delimiter=',')
nikcleju@2 151 deltaCounter += 1
nikcleju@2 152
nikcleju@2 153 if __name__ == "__main__":
nikcleju@2 154
nikcleju@2 155 unittest.main(verbosity=2)
nikcleju@2 156 #suite = unittest.TestLoader().loadTestsFromTestCase(CompareResults)
nikcleju@2 157 #unittest.TextTestRunner(verbosity=2).run(suite)