annotate experiment-reverb/code/supervised_training_pca-bak.py @ 2:c87a9505f294 tip

Added LICENSE for code, removed .wav files
author Emmanouil Theofanis Chourdakis <e.t.chourdakis@qmul.ac.uk>
date Sat, 30 Sep 2017 13:25:50 +0100
parents 246d5546657c
children
rev   line source
e@0 1 #!/usr/bin/python2
e@0 2 # -*- coding: utf-8 -*-
e@0 3 """
e@0 4 Created on Thu Apr 23 11:53:17 2015
e@0 5
e@0 6 @author: mmxgn
e@0 7 """
e@0 8
e@0 9 # This file does the cluster estimation and the removal of outliers
e@0 10
e@0 11 from sys import argv, exit
e@0 12 from essentia.standard import YamlInput, YamlOutput
e@0 13 from essentia import Pool
e@0 14 from pca import *
e@0 15
e@0 16 from numpy import *
e@0 17 from sklearn import cluster
e@0 18 from sklearn.metrics import pairwise_distances
e@0 19 from sklearn.cluster import KMeans, MiniBatchKMeans
e@0 20 import matplotlib.pyplot as plt
e@0 21 from sklearn.mixture import GMM
e@0 22 from sklearn.naive_bayes import GaussianNB, MultinomialNB
e@0 23 from scipy.signal import decimate
e@0 24 #from hmmlearn import hmm
e@0 25 from sklearn import hmm
e@0 26 #from adpcm import adm, adm_reconstruct
e@0 27
e@0 28
e@0 29 mse = lambda A,B: ((array(A)-array(B)) ** 2).mean()
e@0 30
e@0 31
e@0 32
e@0 33
e@0 34 def adm_reconstruct(codeword, h, dmin=.01, dmax=.28):
e@0 35 x = zeros((1, codeword.shape[1]))
e@0 36
e@0 37 delta1 = dmin
e@0 38 delta2 = dmin
e@0 39 Sum = h
e@0 40
e@0 41 x[0] = h
e@0 42 for i in range(0, codeword.shape[1]):
e@0 43 if codeword[0,i] == 0:
e@0 44 delta1 = dmin
e@0 45 delta2 = dmin
e@0 46
e@0 47 elif codeword[0,i] == 1:
e@0 48 delta2 = dmin
e@0 49 Sum += delta1
e@0 50 delta1 *= 2
e@0 51 if delta1 > dmax:
e@0 52 delta1 = dmax
e@0 53
e@0 54 elif codeword[0,i] == -1:
e@0 55 delta1 = dmin
e@0 56 Sum -= delta2
e@0 57 delta2 *= 2
e@0 58 if delta2 > dmax:
e@0 59 delta2 = dmax
e@0 60 x[0,i] = Sum
e@0 61 return x
e@0 62
e@0 63 def adm(x, dmin=.01, dmax=.28, tol=0.0001):
e@0 64
e@0 65 # Adaptive delta modulation adapted by code:
e@0 66 # (adeltamod.m)
e@0 67 #
e@0 68 # Adaptive Delta Modulator
e@0 69 # by Gandhar Desai (gdesai)
e@0 70 # BITS Pilani Goa Campus
e@0 71 # Date: 28 Sept, 2013
e@0 72
e@0 73 xsig = x
e@0 74
e@0 75 Lx = len(x)
e@0 76
e@0 77 ADMout = zeros((1, Lx))
e@0 78 codevec = zeros((1, Lx))
e@0 79
e@0 80
e@0 81 Sum = x[0]
e@0 82 delta1 = dmin
e@0 83 delta2 = dmin
e@0 84 mult1 = 2
e@0 85 mult2 = 2
e@0 86 for i in range(0, Lx):
e@0 87 #print abs(xsig[i] - Sum)
e@0 88 if (abs(xsig[i] - Sum) < tol):
e@0 89 bit = 0
e@0 90 delta2 = dmin
e@0 91 delta1 = dmin
e@0 92
e@0 93
e@0 94 elif (xsig[i] >= Sum):
e@0 95 bit = 1
e@0 96 delta2 = dmin
e@0 97 Sum += delta1
e@0 98 delta1 *= mult1
e@0 99 if delta1 > dmax:
e@0 100 delta1 = dmax
e@0 101
e@0 102
e@0 103 else:
e@0 104 bit = -1
e@0 105 delta1 = dmin
e@0 106 Sum -= delta2
e@0 107 delta2 *= mult2
e@0 108 if delta2 > dmax:
e@0 109 delta2 = dmax
e@0 110
e@0 111
e@0 112
e@0 113 ADMout[0, i] = Sum
e@0 114 codevec[0, i]= bit
e@0 115
e@0 116 return ADMout,codevec, x[0]
e@0 117
e@0 118 if __name__=="__main__":
e@0 119 if len(argv) != 2:
e@0 120 print "[EE] Wrong number of arguments"
e@0 121 print "[II] Correct syntax is:"
e@0 122 print "[II] \t%s <training_file>"
e@0 123 print "[II] where <training_file> is a .yaml file containing the"
e@0 124 print "[II] features of the dataset (try output2_stage/fulltraining-last.yaml)"
e@0 125 exit(-1)
e@0 126
e@0 127
e@0 128 infile = argv[1]
e@0 129
e@0 130 features_pool = YamlInput(filename = infile)()
e@0 131
e@0 132
e@0 133
e@0 134 feature_captions = features_pool.descriptorNames()
e@0 135 parameter_captions = []
e@0 136
e@0 137
e@0 138 for c in features_pool.descriptorNames():
e@0 139 if c.split('.')[0] == 'parameter':
e@0 140 parameter_captions.append(c)
e@0 141 if c.split('.')[0] == 'metadata' or c.split('.')[0] == 'parameter':
e@0 142 feature_captions.remove(c)
e@0 143
e@0 144
e@0 145
e@0 146 close('all')
e@0 147
e@0 148 print "[II] Loaded training data from %s (%s) " % (infile, features_pool['metadata.date'][0])
e@0 149 print "[II] %d Features Available: " % len(feature_captions)
e@0 150
e@0 151
e@0 152
e@0 153 print str(feature_captions).replace("', ","\n").replace('[','').replace("'","[II]\t ")[:-7]
e@0 154
e@0 155 nfeatures_in = len(feature_captions)
e@0 156 nparameters_in = len(parameter_captions)
e@0 157 features_vector = zeros((nfeatures_in, len(features_pool[feature_captions[0]])))
e@0 158
e@0 159 parameters_vector = zeros((nparameters_in, len(features_pool[parameter_captions[0]])))
e@0 160
e@0 161
e@0 162 for i in range(0, nfeatures_in):
e@0 163 features_vector[i, :] = features_pool[feature_captions[i]].T
e@0 164 for i in range(0, nparameters_in):
e@0 165 parameters_vector[i, :] = features_pool[parameter_captions[0]].T
e@0 166
e@0 167 print "[II] %d parameters used:" % len(parameter_captions)
e@0 168 print str(parameter_captions).replace("', ","\n").replace('[','').replace("'","[II]\t ")[:-7].replace('parameter.','')
e@0 169
e@0 170 print "[II] Marking silent parts"
e@0 171
e@0 172 silent_parts = zeros((1, len(features_pool[feature_captions[i]].T)))
e@0 173
e@0 174 rms = features_vector[feature_captions.index('rms'), :]
e@0 175
e@0 176 # Implementing Hysteresis Gate -- High threshold is halfway between
e@0 177 # the mean and the max and Low is halfway between the mean dn the min
e@0 178
e@0 179 rms_threshold_mean = mean(rms)
e@0 180
e@0 181 rms_threshold_max = max(rms)
e@0 182 rms_threshold_min = min(rms)
e@0 183
e@0 184 rms_threshold_high = 0.1 * rms_threshold_mean
e@0 185 rms_threshold_low = 0.01 * rms_threshold_mean
e@0 186
e@0 187 for n in range(1, len(rms)):
e@0 188 prev = rms[n-1]
e@0 189 curr = rms[n]
e@0 190
e@0 191 if prev >= rms_threshold_high:
e@0 192 if curr < rms_threshold_low:
e@0 193 silent_parts[0,n] = 1
e@0 194 else:
e@0 195 silent_parts[0,n] = 0
e@0 196 elif prev <= rms_threshold_low:
e@0 197 if curr > rms_threshold_high:
e@0 198 silent_parts[0,n] = 0
e@0 199 else:
e@0 200 silent_parts[0,n] = 1
e@0 201 else:
e@0 202 silent_parts[0,n] = silent_parts[0,n-1]
e@0 203
e@0 204
e@0 205 if silent_parts[0,1] == 1:
e@0 206 silent_parts[0, 0] = 1
e@0 207
e@0 208
e@0 209 # plot(rms)
e@0 210 # plot(silent_parts.T)
e@0 211 # plot(ones((len(rms), 1))*rms_threshold_high)
e@0 212 # plot(ones((len(rms), 1))*rms_threshold_low)
e@0 213
e@0 214 active_index = invert(silent_parts.flatten().astype(bool))
e@0 215
e@0 216 # Keep only active parts
e@0 217
e@0 218 # Uncomment this
e@0 219 #features_vector = features_vector[:, active_index]
e@0 220 # parameters_vector = parameters_vector[:, active_index]
e@0 221
e@0 222 moments_vector = zeros((features_vector.shape[0], 2))
e@0 223
e@0 224 print "[II] Storing moments vector"
e@0 225 for i in range(0, features_vector.shape[0]):
e@0 226 mean_ = mean(features_vector[i,:])
e@0 227 std_ = std(features_vector[i,:], ddof=1)
e@0 228 moments_vector[i,0] = mean_
e@0 229 moments_vector[i,1] = std_
e@0 230
e@0 231 features_vector[i,:] = (features_vector[i,:] - mean_)/std_
e@0 232
e@0 233
e@0 234
e@0 235 print "[II] Extracting PCA configuration "
e@0 236
e@0 237 kernel, q, featurelist = extract_pca_configuration_from_data(features_vector)
e@0 238
e@0 239 print "[II] Optimal number of PCs to keep: %d" % q
e@0 240
e@0 241 feature_captions_array = array(feature_captions)
e@0 242
e@0 243 features_to_keep = features_vector
e@0 244
e@0 245 features_to_keep = list(feature_captions_array[featurelist])
e@0 246 print "[II] Decided to keep %d features:" % len(features_to_keep)
e@0 247 print str(features_to_keep).replace("', ","\n").replace('[','').replace("'","[II]\t ")[:-7]
e@0 248
e@0 249
e@0 250
e@0 251 # Keep the desired features
e@0 252 #Uncomment this
e@0 253 #features_kept_data = features_vector[featurelist,:]
e@0 254 features_lept_data = features_vector
e@0 255
e@0 256 # Generate the parameter clusters using k-means
e@0 257
e@0 258 # Uncomment this
e@0 259 features_vector = (kernel.T*features_kept_data)[0:q,:]
e@0 260
e@0 261
e@0 262
e@0 263 # parameters_k_means = KMeans(n_clusters = parameters_k, init='k-means++', max_iter=300, tol=0.0000001, verbose = 1)
e@0 264 parameters_k_means = KMeans(init='k-means++', n_init=10, max_iter=300, tol=0.0000001, verbose = 0)
e@0 265 # # parameters_k_means = MiniBatchKMeans(init='k-means++', max_iter=300, tol=0.00001, verbose = 1)
e@0 266 #
e@0 267 # # Quantize the differences of the parameters instead of the parameters themselves
e@0 268 # parameters_vector_diff = concatenate((zeros((shape(parameters_vector)[0],1)),diff(parameters_vector, axis=1)),axis=1)
e@0 269 # features_vector_diff = concatenate((zeros((shape(features_vector)[0],1)),diff(features_vector,axis=1)),axis=1)
e@0 270 #
e@0 271 # # Delete this afterwards
e@0 272 # # features_vector = features_vector_diff
e@0 273 # parameters_k_means.fit(parameters_vector_diff.T)
e@0 274 parameters_k_means.fit(parameters_vector.T)
e@0 275 #
e@0 276 parameters_k_means_centers = parameters_k_means.cluster_centers_
e@0 277 parameters_k_means_labels = parameters_k_means.labels_
e@0 278 #
e@0 279 parameters_vector_estimated = zeros(shape(parameters_vector))
e@0 280 #
e@0 281 for n in range(0, len(parameters_vector_estimated[0])):
e@0 282 parameters_vector_estimated[:,n] = parameters_k_means_centers[parameters_k_means_labels[n]]
e@0 283 #
e@0 284 ## plot(parameters_vector[0])
e@0 285 # # plot(parameters_vector_estimated[0])
e@0 286 #
e@0 287 # # PROvLIMA EDW
e@0 288 print "[II] Parameters MSE for %d clusters: %.3f" % (len(parameters_k_means.cluster_centers_), mse(parameters_vector, parameters_vector_estimated))
e@0 289
e@0 290 #
e@0 291
e@0 292
e@0 293
e@0 294
e@0 295
e@0 296 # print "[II] Clustering features."
e@0 297 #
e@0 298 # n_clusters = 100
e@0 299 # features_clustering = GMM(n_components = n_clusters, covariance_type='diag')
e@0 300 #
e@0 301 # features_clustering.fit( features_vector.T, y=parameters_k_means_labels)
e@0 302 #
e@0 303 # features_clustering_means = features_clustering.means_
e@0 304 # features_clustering_labels = features_clustering.predict(features_vector.T)
e@0 305 # features_clustering_sigmas = features_clustering.covars_
e@0 306 #
e@0 307 # features_vector_estimated = zeros(shape(features_vector))
e@0 308 #
e@0 309 #
e@0 310 # for n in range(0, len(features_vector_estimated[0])):
e@0 311 # features_vector_estimated[:,n] = features_clustering_means[features_clustering_labels[n]]
e@0 312 #
e@0 313 # # for n in range(0,features_vector.shape[0]):
e@0 314 # # hist(features_vector[1]-features_vector_estimated[1], 256)
e@0 315 # std(features_vector[1]-features_vector_estimated[1], ddof=1)
e@0 316 # mean(features_vector[1]-features_vector_estimated[1])
e@0 317 #
e@0 318 # print "[II] Features MSE for %d clusters: %.3f" % (n_clusters, mse(features_vector, features_vector_estimated))
e@0 319 #
e@0 320 #
e@0 321 # print "[II] Testing Gaussian Naive Bayes Classifier"
e@0 322 #
e@0 323 # gnb = GaussianNB()
e@0 324 # gnb.fit(features_vector.T, parameters_k_means_labels)
e@0 325 # parameter_labels_estimated = gnb.predict(features_vector.T)
e@0 326 #
e@0 327 # print "[II] Raw Parameters - Gaussian Naive Bayes classifier testing ratio: %.4f" % (float(sum((parameter_labels_estimated == parameters_k_means_labels).astype(float)))/float(len(parameters_k_means_labels)))
e@0 328 #
e@0 329 #
e@0 330 # print "[II] Trying ADM-coded parameters"
e@0 331 # UpsamplingFactor = 100
e@0 332 # print "[II] Upsampling features and parameters by a factor of %d" % UpsamplingFactor
e@0 333 #
e@0 334 #
e@0 335 # # Upsampled features and parameters
e@0 336 # features_vector_upsampled = repeat(features_vector,UpsamplingFactor, axis=1)
e@0 337 # feature_labels_upsampled = repeat(features_clustering_labels,UpsamplingFactor, axis=0)
e@0 338 # parameters_vector_upsampled = repeat(parameters_vector,UpsamplingFactor, axis=1)
e@0 339 #
e@0 340 # parameters_vector_upsampled_adm = matrix(zeros(shape(parameters_vector_upsampled)))
e@0 341 # parameters_vector_upsampled_code = matrix(zeros(shape(parameters_vector_upsampled)))
e@0 342 # parameters_vector_upsampled_firstval = matrix(zeros((parameters_vector_upsampled.shape[0],1)))
e@0 343 #
e@0 344 # # Reconstructed parameters
e@0 345 #
e@0 346 # parameters_vector_upsampled_reconstructed = matrix(zeros(shape(parameters_vector_upsampled)))
e@0 347 #
e@0 348 # dmin = 0.001
e@0 349 # dmax = 0.28
e@0 350 # tol = 0.001
e@0 351 # for i in range(0, parameters_vector_upsampled.shape[0]):
e@0 352 # out, code, h = adm(parameters_vector_upsampled[i,:],dmin=dmin,dmax=dmax,tol=tol)
e@0 353 # parameters_vector_upsampled_adm[i,:] = out
e@0 354 # parameters_vector_upsampled_code[i,:] = code
e@0 355 # parameters_vector_upsampled_firstval[i, 0] = h
e@0 356 #
e@0 357 #
e@0 358 # # Reconstruct-ADM
e@0 359 # parameters_vector_upsampled_reconstructed[i,:] = adm_reconstruct(parameters_vector_upsampled_code[i,:],parameters_vector_upsampled_firstval[i,0], dmin=dmin,dmax=dmax)
e@0 360 #
e@0 361 #
e@0 362 # # plot(parameters_vector_upsampled_adm.T, 'r--')
e@0 363 #
e@0 364 #
e@0 365 # # plot(parameters_vector_upsampled.T)
e@0 366 # # plot(parameters_vector_upsampled_reconstructed.T, 'g.')
e@0 367 #
e@0 368 #
e@0 369 #
e@0 370 # parameters_vector_reconstructed = zeros(shape(parameters_vector))
e@0 371 # for n in range(0, parameters_vector.shape[1]):
e@0 372 # parameters_vector_reconstructed[:,n] = parameters_vector_upsampled_reconstructed[:,n*UpsamplingFactor]
e@0 373 #
e@0 374 #
e@0 375 # mse_adm = mse(parameters_vector_reconstructed, parameters_vector)
e@0 376 #
e@0 377 # print "[II] Expected ADM reconstruction MSE: %.4f" % mse_adm
e@0 378 #
e@0 379 # # figure()
e@0 380 # #plot(parameters_vector.T)
e@0 381 # # plot(parameters_vector_reconstructed.T)
e@0 382 #
e@0 383 # print "[II] Building HMM transition, emission matrices and priors"
e@0 384 #
e@0 385 # transmat = zeros((3,3))
e@0 386 # startprob = zeros((3,))
e@0 387 # emissionmat = zeros((3, n_clusters))
e@0 388 #
e@0 389 #
e@0 390 # state_labels = parameters_vector_upsampled_code + 1
e@0 391 # stateseq = state_labels.T
e@0 392 #
e@0 393 # for n in range(0,shape(parameters_vector_upsampled_code)[1]):
e@0 394 # if n>0:
e@0 395 # transmat[state_labels[0,n-1],state_labels[0,n]] += 1
e@0 396 # startprob[state_labels[0,n]] +=1
e@0 397 # emissionmat[state_labels[0,n],feature_labels_upsampled[n]] += 1
e@0 398 #
e@0 399 #
e@0 400 # for n in range(0, transmat.shape[0]):
e@0 401 # transmat[n,:]/=sum(transmat[n,:])
e@0 402 # emissionmat[n,:]/=sum(emissionmat[n,:])
e@0 403 #
e@0 404 #
e@0 405 # transmat = matrix(transmat)
e@0 406 # emissionmat = matrix(emissionmat)
e@0 407 # # Prior
e@0 408 # startprob = startprob/sum(startprob)
e@0 409 # startprob = ravel(startprob)
e@0 410 #
e@0 411 # # Vocabulary
e@0 412 #
e@0 413 # model = hmm.GMMHMM(n_mix=n_clusters, n_components=3, covariance_type="diag")
e@0 414 # model.means_ = features_clustering.means_
e@0 415 # model.covars_ = features_clustering.covars_
e@0 416 #
e@0 417 # features_vector_array = array(features_vector)
e@0 418 # features_vector_upsampled_array=array(features_vector_upsampled)
e@0 419 #
e@0 420 # model.fit([features_vector_array.T])
e@0 421 # stateseq_estimated = model.predict(features_vector_upsampled_array.T)
e@0 422 #
e@0 423 # parameters_vector_upsampled_reconstructed_decoded = matrix(zeros(shape(parameters_vector_upsampled)))
e@0 424 #
e@0 425 #
e@0 426 # plot(stateseq_estimated)
e@0 427 # plot(stateseq)
e@0 428 #
e@0 429 # code_estimated = matrix(zeros(shape(parameters_vector_upsampled)))
e@0 430 # code_estimated[0,:] = stateseq_estimated - 1
e@0 431 #
e@0 432 #
e@0 433 #
e@0 434 # parameters_vector_upsampled_reconstructed_estimated = matrix(zeros(shape(parameters_vector_upsampled)))
e@0 435 #
e@0 436 # for i in range(0, parameters_vector_upsampled.shape[0]):
e@0 437 # parameters_vector_upsampled_reconstructed_estimated[i,:] = adm_reconstruct(code_estimated,parameters_vector_upsampled_firstval[i,0], dmin=dmin,dmax=dmax)
e@0 438 # figure()
e@0 439 # plot(parameters_vector_upsampled_reconstructed_estimated.T)
e@0 440 # plot(parameters_vector_upsampled.T)