mi@0
|
1 #!/usr/bin/env python
|
mi@0
|
2 # encoding: utf-8
|
mi@0
|
3 """
|
mi@0
|
4 SegEval.py
|
mi@0
|
5
|
mi@0
|
6 The main segmentation program.
|
mi@0
|
7
|
mi@0
|
8 Created by mi tian on 2015-04-02.
|
mi@0
|
9 Copyright (c) 2015 __MyCompanyName__. All rights reserved.
|
mi@0
|
10 """
|
mi@0
|
11
|
mi@0
|
12 # Load starndard python libs
|
mi@0
|
13 import sys, os, optparse, csv
|
mi@0
|
14 from itertools import combinations
|
mi@0
|
15 from os.path import join, isdir, isfile, abspath, dirname, basename, split, splitext
|
mi@0
|
16 from copy import copy
|
mi@0
|
17
|
mi@0
|
18 import matplotlib
|
mi@0
|
19 # matplotlib.use('Agg')
|
mi@0
|
20 import matplotlib.pyplot as plt
|
mi@0
|
21 import matplotlib.gridspec as gridspec
|
mi@0
|
22 import numpy as np
|
mi@0
|
23 import scipy as sp
|
mi@0
|
24 from scipy.signal import correlate2d, convolve2d, filtfilt, resample
|
mi@0
|
25 from scipy.ndimage.filters import *
|
mi@0
|
26 from sklearn.decomposition import PCA
|
mi@0
|
27 from sklearn.mixture import GMM
|
mi@0
|
28 from sklearn.cluster import KMeans
|
mi@0
|
29 from sklearn.preprocessing import normalize
|
mi@0
|
30 from sklearn.metrics.pairwise import pairwise_distances
|
mi@0
|
31
|
mi@0
|
32 # Load dependencies
|
mi@0
|
33 from utils.SegUtil import getMean, getStd, getDelta, getSSM, reduceSSM, upSample, normaliseFeature
|
mi@0
|
34 from utils.PeakPickerUtil import PeakPicker
|
mi@0
|
35 from utils.gmmdist import *
|
mi@0
|
36 from utils.GmmMetrics import GmmDistance
|
mi@0
|
37 from utils.RankClustering import rClustering
|
mi@0
|
38 from utils.kmeans import Kmeans
|
mi@0
|
39 from utils.PathTracker import PathTracker
|
mi@0
|
40
|
mi@0
|
41 # Load bourdary retrieval utilities
|
mi@0
|
42 import cnmf as cnmf_S
|
mi@0
|
43 import foote as foote_S
|
mi@0
|
44 import sf as sf_S
|
mi@0
|
45 import fmc2d as fmc2d_S
|
mitian@1
|
46 import novelty as novelty_S
|
mitian@1
|
47
|
mitian@1
|
48 # Algorithm params
|
mitian@1
|
49 h = 8 # Size of median filter for features in C-NMF
|
mitian@1
|
50 R = 15 # Size of the median filter for the activation matrix C-NMF
|
mitian@1
|
51 rank = 4 # Rank of decomposition for the boundaries
|
mitian@1
|
52 rank_labels = 6 # Rank of decomposition for the labels
|
mitian@1
|
53 R_labels = 6 # Size of the median filter for the labels
|
mitian@1
|
54 # Foote
|
mitian@1
|
55 M = 2 # Median filter for the audio features (in beats)
|
mitian@1
|
56 Mg = 32 # Gaussian kernel size
|
mitian@1
|
57 L = 16 # Size of the median filter for the adaptive threshold
|
mitian@1
|
58 # 2D-FMC
|
mitian@1
|
59 N = 8 # Size of the fixed length segments (for 2D-FMC)
|
mitian@1
|
60
|
mi@0
|
61
|
mi@0
|
62 # Define arg parser
|
mi@0
|
63 def parse_args():
|
mi@0
|
64 op = optparse.OptionParser()
|
mi@0
|
65 # IO options
|
mi@0
|
66 op.add_option('-g', '--gammatonegram-features', action="store", dest="GF", default='/Volumes/c4dm-03/people/mit/features/gammatonegram/qupujicheng/2048', type="str", help="Loading gammatone features from.." )
|
mi@0
|
67 op.add_option('-s', '--spectrogram-features', action="store", dest="SF", default='/Volumes/c4dm-03/people/mit/features/spectrogram/qupujicheng/2048', type="str", help="Loading spectral features from.." )
|
mi@0
|
68 op.add_option('-t', '--tempogram-features', action="store", dest="TF", default='/Volumes/c4dm-03/people/mit/features/tempogram/qupujicheng/tempo_features_6s', type="str", help="Loading tempogram features from.." )
|
mi@0
|
69 op.add_option('-f', '--featureset', action="store", dest="FEATURES", default='[0, 1, 2, 3]', type="str", help="Choose feature subsets (input a list of integers) used for segmentation -- gammtone, chroma, timbre, tempo -- 0, 1, 2, 3." )
|
mi@0
|
70 op.add_option('-a', '--annotations', action="store", dest="GT", default='/Volumes/c4dm-03/people/mit/annotation/qupujicheng/lowercase', type="str", help="Loading annotation files from.. ")
|
mi@0
|
71 op.add_option('-o', '--ouput', action="store", dest="OUTPUT", default='/Volumes/c4dm-03/people/mit/segmentation/gammatone/qupujicheng', type="str", help="Write segmentation results to ")
|
mi@0
|
72
|
mi@0
|
73 # boundary retrieval options
|
mitian@1
|
74 op.add_option('-b', '--bounrary-method', action="store", dest="BOUNDARY", type='choice', choices=['novelty', 'cnmf', 'foote', 'sf'], default='novelty', help="Choose boundary retrieval algorithm ('novelty', 'cnmf', 'sf', 'fmc2d')." )
|
mitian@1
|
75 op.add_option('-l', '--labeling-method', action="store", dest="LABEL", type='choice', choices=['cnmf', 'fmc2d'], default='cnmf', help="Choose boundary labeling algorithm ('cnmf', 'fmc2d')." )
|
mi@0
|
76
|
mi@0
|
77 # Plot/print/mode options
|
mi@0
|
78 op.add_option('-p', '--plot', action="store_true", dest="PLOT", default=False, help="Save plots")
|
mi@0
|
79 op.add_option('-e', '--test-mode', action="store_true", dest="TEST", default=False, help="Test mode")
|
mi@0
|
80 op.add_option('-v', '--verbose-mode', action="store_true", dest="VERBOSE", default=False, help="Print results in verbose mode.")
|
mi@0
|
81
|
mi@0
|
82 return op.parse_args()
|
mi@0
|
83 options, args = parse_args()
|
mi@0
|
84
|
mi@0
|
85 class FeatureObj() :
|
mi@0
|
86 __slots__ = ['key', 'audio', 'timestamps', 'gammatone_features', 'tempo_features', 'timbre_features', 'harmonic_features', 'gammatone_ssm', 'tempo_ssm', 'timbre_features', 'harmonic_ssm', 'ssm_timestamps']
|
mi@0
|
87
|
mi@0
|
88 class AudioObj():
|
mi@0
|
89 __slots__ = ['name', 'feature_list', 'gt', 'label', 'gammatone_features', 'tempo_features', 'timbre_features', 'harmonic_features', 'combined_features',\
|
mi@0
|
90 'gammatone_ssm', 'tempo_ssm', 'timbre_ssm', 'harmonic_ssm', 'combined_ssm', 'ssm', 'ssm_timestamps', 'tempo_timestamps']
|
mi@0
|
91
|
mi@0
|
92 class EvalObj():
|
mi@0
|
93 __slots__ = ['TP', 'FP', 'FN', 'P', 'R', 'F', 'AD', 'DA']
|
mi@0
|
94
|
mi@0
|
95
|
mi@0
|
96 class SSMseg(object):
|
mi@0
|
97 '''The main segmentation object'''
|
mi@0
|
98 def __init__(self):
|
mi@0
|
99 self.SampleRate = 44100
|
mi@0
|
100 self.NqHz = self.SampleRate/2
|
mi@0
|
101 self.timestamp = []
|
mi@0
|
102 self.previousSample = 0.0
|
mi@0
|
103 self.featureWindow = 6.0
|
mi@0
|
104 self.featureStep = 3.0
|
mi@0
|
105 self.kernel_size = 64 # Adjust this param according to the feature resolution.pq
|
mi@0
|
106 self.blockSize = 2048
|
mi@0
|
107 self.stepSize = 1024
|
mi@0
|
108
|
mi@0
|
109 '''NOTE: Match the following params with those used for feature extraction!'''
|
mi@0
|
110
|
mi@0
|
111 '''NOTE: Unlike spectrogram ones, Gammatone features are extracted without taking an FFT. The windowing is done under the purpose of chunking
|
mi@0
|
112 the audio to facilitate the gammatone filtering with the specified blockSize and stepSize. The resulting gammatonegram is aggregated every
|
mi@0
|
113 gammatoneLen without overlap.'''
|
mi@0
|
114 self.gammatoneLen = 2048
|
mi@0
|
115 self.gammatoneBandGroups = [0, 2, 6, 10, 13, 17, 20]
|
mi@0
|
116 self.nGammatoneBands = 20
|
mi@0
|
117 self.lowFreq = 100
|
mi@0
|
118 self.highFreq = self.SampleRate / 4
|
mi@0
|
119
|
mi@0
|
120 '''Settings for extracting tempogram features.'''
|
mi@0
|
121 self.tempoWindow = 6.0
|
mi@0
|
122 self.bpmBands = [30, 45, 60, 80, 100, 120, 180, 240, 400, 600]
|
mi@0
|
123
|
mi@0
|
124 '''Peak picking settings'''
|
mi@0
|
125 self.threshold = 50
|
mi@0
|
126 self.confidence_threshold = 0.5
|
mi@0
|
127 self.delta_threshold = 0.0
|
mi@0
|
128 self.backtracking_threshold = 1.9
|
mi@0
|
129 self.polyfitting_on = True
|
mi@0
|
130 self.medfilter_on = True
|
mi@0
|
131 self.LPfilter_on = True
|
mi@0
|
132 self.whitening_on = False
|
mi@0
|
133 self.aCoeffs = [1.0000, -0.5949, 0.2348]
|
mi@0
|
134 self.bCoeffs = [0.1600, 0.3200, 0.1600]
|
mi@0
|
135 self.cutoff = 0.34
|
mi@0
|
136 self.medianWin = 7
|
mi@0
|
137
|
mi@0
|
138
|
mi@0
|
139 def pairwiseF(self, annotation, detection, tolerance=3.0, combine=1.0):
|
mi@0
|
140 '''Pairwise F measure evaluation of detection rates.'''
|
mi@0
|
141
|
mi@0
|
142 # print 'detection', detection
|
mi@0
|
143 detection = np.append(detection, annotation[-1])
|
mi@0
|
144 res = EvalObj()
|
mi@0
|
145 res.TP = 0 # Total number of matched ground truth and experimental data points
|
mi@0
|
146 gt = len(annotation) # Total number of ground truth data points
|
mi@0
|
147 dt = len(detection) # Total number of experimental data points
|
mi@0
|
148 foundIdx = []
|
mi@0
|
149 D_AD = np.zeros(gt)
|
mi@0
|
150 D_DA = np.zeros(dt)
|
mi@0
|
151
|
mi@0
|
152 for dtIdx in xrange(dt):
|
mi@0
|
153 D_DA[dtIdx] = np.min(abs(detection[dtIdx] - annotation))
|
mi@0
|
154 for gtIdx in xrange(gt):
|
mi@0
|
155 D_AD[gtIdx] = np.min(abs(annotation[gtIdx] - detection))
|
mi@0
|
156 for dtIdx in xrange(dt):
|
mi@0
|
157 if (annotation[gtIdx] >= detection[dtIdx] - tolerance/2.0) and (annotation[gtIdx] <= detection[dtIdx] + tolerance/2.0):
|
mi@0
|
158 res.TP = res.TP + 1.0
|
mi@0
|
159 foundIdx.append(gtIdx)
|
mi@0
|
160 foundIdx = list(set(foundIdx))
|
mi@0
|
161 res.TP = len(foundIdx)
|
mi@0
|
162 res.FP = max(0, dt - res.TP)
|
mi@0
|
163 res.FN = max(0, gt - res.TP)
|
mi@0
|
164
|
mi@0
|
165 res.AD = np.mean(D_AD)
|
mi@0
|
166 res.DA = np.mean(D_DA)
|
mi@0
|
167
|
mi@0
|
168 res.P, res.R, res.F = 0.0, 0.0, 0.0
|
mi@0
|
169
|
mi@0
|
170 if res.TP == 0:
|
mi@0
|
171 return res
|
mi@0
|
172
|
mi@0
|
173 res.P = res.TP / float(dt)
|
mi@0
|
174 res.R = res.TP / float(gt)
|
mi@0
|
175 res.F = 2 * res.P * res.R / (res.P + res.R)
|
mi@0
|
176 return res
|
mi@0
|
177
|
mi@0
|
178
|
mi@0
|
179 def process(self):
|
mi@0
|
180 '''For the aggregated input features, discard a propertion each time as the pairwise distances within the feature space descending.
|
mi@0
|
181 In the meanwhile evaluate the segmentation result and track the trend of perfomance changing by measuring the feature selection
|
mi@0
|
182 threshold - segmentation f measure curve.
|
mi@0
|
183 '''
|
mi@0
|
184
|
mi@0
|
185 peak_picker = PeakPicker()
|
mi@0
|
186 peak_picker.params.alpha = 9.0 # Alpha norm
|
mi@0
|
187 peak_picker.params.delta = self.delta_threshold # Adaptive thresholding delta
|
mi@0
|
188 peak_picker.params.QuadThresh_a = (100 - self.threshold) / 1000.0
|
mi@0
|
189 peak_picker.params.QuadThresh_b = 0.0
|
mi@0
|
190 peak_picker.params.QuadThresh_c = (100 - self.threshold) / 1500.0
|
mi@0
|
191 peak_picker.params.rawSensitivity = 20
|
mi@0
|
192 peak_picker.params.aCoeffs = self.aCoeffs
|
mi@0
|
193 peak_picker.params.bCoeffs = self.bCoeffs
|
mi@0
|
194 peak_picker.params.preWin = self.medianWin
|
mi@0
|
195 peak_picker.params.postWin = self.medianWin + 1
|
mi@0
|
196 peak_picker.params.LP_on = self.LPfilter_on
|
mi@0
|
197 peak_picker.params.Medfilt_on = self.medfilter_on
|
mi@0
|
198 peak_picker.params.Polyfit_on = self.polyfitting_on
|
mi@0
|
199 peak_picker.params.isMedianPositive = False
|
mi@0
|
200
|
mi@0
|
201 # Settings used for feature extraction
|
mi@0
|
202 feature_window_frame = int(self.SampleRate / self.gammatoneLen * self.featureWindow)
|
mi@0
|
203 feature_step_frame = int(0.5 * self.SampleRate / self.gammatoneLen * self.featureStep)
|
mi@0
|
204 aggregation_window, aggregation_step = 100, 50
|
mi@0
|
205 featureRate = float(self.SampleRate) / self.stepSize
|
mi@0
|
206
|
mi@0
|
207 audio_files = [x for x in os.listdir(options.GT) if not x.startswith(".") ]
|
mi@0
|
208 # audio_files = audio_files[:2]
|
mi@0
|
209 audio_files.sort()
|
mi@0
|
210 audio_list = []
|
mi@0
|
211
|
mi@0
|
212 gammatone_feature_list = [i for i in os.listdir(options.GF) if not i.startswith('.')]
|
mi@0
|
213 gammatone_feature_list = ['contrast4', 'rolloff', 'dct']
|
mi@0
|
214 tempo_feature_list = [i for i in os.listdir(options.TF) if not i.startswith('.')]
|
mi@0
|
215 tempo_feature_list = ['intensity_bpm', 'loudness_bpm']
|
mi@0
|
216 timbre_feature_list = ['mfcc']
|
mi@0
|
217 harmonic_feature_list = ['nnls']
|
mi@0
|
218
|
mi@0
|
219 gammatone_feature_list = [join(options.GF, f) for f in gammatone_feature_list]
|
mi@0
|
220 timbre_feature_list = [join(options.SF, f) for f in timbre_feature_list]
|
mi@0
|
221 tempo_feature_list = [join(options.TF, f) for f in tempo_feature_list]
|
mi@0
|
222 harmonic_feature_list = [join(options.SF, f) for f in harmonic_feature_list]
|
mi@0
|
223
|
mi@0
|
224 fobj_list = []
|
mi@0
|
225
|
mi@0
|
226 # For each audio file, load specific features
|
mi@0
|
227 for audio in audio_files:
|
mi@0
|
228 ao = AudioObj()
|
mi@0
|
229 ao.name = splitext(audio)[0]
|
mi@0
|
230 print ao.name
|
mi@0
|
231 # annotation_file = join(options.GT, ao.name+'.txt') # iso, salami
|
mi@0
|
232 # ao.gt = np.genfromtxt(annotation_file, usecols=0)
|
mi@0
|
233 # ao.label = np.genfromtxt(annotation_file, usecols=1, dtype=str)
|
mi@0
|
234 annotation_file = join(options.GT, ao.name+'.csv') # qupujicheng
|
mi@0
|
235 ao.gt = np.genfromtxt(annotation_file, usecols=0, delimiter=',')
|
mi@0
|
236 ao.label = np.genfromtxt(annotation_file, usecols=1, delimiter=',', dtype=str)
|
mi@0
|
237
|
mi@0
|
238 gammatone_featureset, timbre_featureset, tempo_featureset, harmonic_featureset = [], [], [], []
|
mi@0
|
239 for feature in gammatone_feature_list:
|
mi@0
|
240 for f in os.listdir(feature):
|
mi@0
|
241 if f[:f.find('_vamp')]==ao.name:
|
mi@0
|
242 gammatone_featureset.append(np.genfromtxt(join(feature, f), delimiter=',',filling_values=0.0)[:,1:])
|
mi@0
|
243 break
|
mi@0
|
244 if len(gammatone_feature_list) > 1:
|
mi@0
|
245 n_frame = np.min([x.shape[0] for x in gammatone_featureset])
|
mi@0
|
246 gammatone_featureset = [x[:n_frame,:] for x in gammatone_featureset]
|
mi@0
|
247 ao.gammatone_features = np.hstack((gammatone_featureset))
|
mi@0
|
248 else:
|
mi@0
|
249 ao.gammatone_features = gammatone_featureset[0]
|
mi@0
|
250
|
mi@0
|
251 for feature in timbre_feature_list:
|
mi@0
|
252 for f in os.listdir(feature):
|
mi@0
|
253 if f[:f.find('_vamp')]==ao.name:
|
mi@0
|
254 timbre_featureset.append(np.genfromtxt(join(feature, f), delimiter=',',filling_values=0.0)[:,1:])
|
mi@0
|
255 break
|
mi@0
|
256 if len(timbre_feature_list) > 1:
|
mi@0
|
257 n_frame = np.min([x.shape[0] for x in timbre_featureset])
|
mi@0
|
258 timbre_featureset = [x[:n_frame,:] for x in timbre_featureset]
|
mi@0
|
259 ao.timbre_features = np.hstack((timbre_featureset))
|
mi@0
|
260 else:
|
mi@0
|
261 ao.timbre_features = timbre_featureset[0]
|
mi@0
|
262 for feature in tempo_feature_list:
|
mi@0
|
263 for f in os.listdir(feature):
|
mi@0
|
264 if f[:f.find('_vamp')]==ao.name:
|
mi@0
|
265 tempo_featureset.append(np.genfromtxt(join(feature, f), delimiter=',',filling_values=0.0)[1:,1:])
|
mi@0
|
266 ao.tempo_timestamps = np.genfromtxt(join(feature, f), delimiter=',',filling_values=0.0)[1:,0]
|
mi@0
|
267 break
|
mi@0
|
268 if len(tempo_feature_list) > 1:
|
mi@0
|
269 n_frame = np.min([x.shape[0] for x in tempo_featureset])
|
mi@0
|
270 tempo_featureset = [x[:n_frame,:] for x in tempo_featureset]
|
mi@0
|
271 ao.tempo_features = np.hstack((tempo_featureset))
|
mi@0
|
272 else:
|
mi@0
|
273 ao.tempo_features = tempo_featureset[0]
|
mi@0
|
274 for feature in harmonic_feature_list:
|
mi@0
|
275 for f in os.listdir(feature):
|
mi@0
|
276 if f[:f.find('_vamp')]==ao.name:
|
mi@0
|
277 harmonic_featureset.append(np.genfromtxt(join(feature, f), delimiter=',',filling_values=0.0)[:,1:])
|
mi@0
|
278 break
|
mi@0
|
279 if len(harmonic_feature_list) > 1:
|
mi@0
|
280 n_frame = np.min([x.shape[0] for x in harmonic_featureset])
|
mi@0
|
281 harmonic_featureset = [x[:n_frame,:] for x in harmonic_featureset]
|
mi@0
|
282 ao.harmonic_features = np.hstack((harmonic_featureset))
|
mi@0
|
283 else:
|
mi@0
|
284 ao.harmonic_features = harmonic_featureset[0]
|
mi@0
|
285
|
mi@0
|
286 # Get aggregated features for computing ssm
|
mi@0
|
287 aggregation_window, aggregation_step = 1,1
|
mi@0
|
288 featureRate = float(self.SampleRate) /self.stepSize
|
mi@0
|
289 pca = PCA(n_components=5)
|
mi@0
|
290
|
mi@0
|
291 # Resample and normalise features
|
mi@0
|
292 ao.gammatone_features = resample(ao.gammatone_features, step)
|
mi@0
|
293 ao.gammatone_features = normaliseFeature(ao.gammatone_features)
|
mi@0
|
294 ao.timbre_features = resample(ao.timbre_features, step)
|
mi@0
|
295 ao.timbre_features = normaliseFeature(ao.timbre_features)
|
mi@0
|
296 ao.harmonic_features = resample(ao.harmonic_features, step)
|
mi@0
|
297 ao.harmonic_features = normaliseFeature(ao.harmonic_features)
|
mi@0
|
298 ao.tempo_features = normaliseFeature(ao.harmonic_features)
|
mi@0
|
299
|
mi@0
|
300 pca.fit(ao.gammatone_features)
|
mi@0
|
301 ao.gammatone_features = pca.transform(ao.gammatone_features)
|
mi@0
|
302 ao.gammatone_ssm = getSSM(ao.gammatone_features)
|
mi@0
|
303
|
mi@0
|
304 pca.fit(ao.tempo_features)
|
mi@0
|
305 ao.tempo_features = pca.transform(ao.tempo_features)
|
mi@0
|
306 ao.tempo_ssm = getSSM(ao.tempo_features)
|
mi@0
|
307
|
mi@0
|
308 pca.fit(ao.timbre_features)
|
mi@0
|
309 ao.timbre_features = pca.transform(ao.timbre_features)
|
mi@0
|
310 ao.timbre_ssm = getSSM(ao.timbre_features)
|
mi@0
|
311
|
mi@0
|
312 pca.fit(ao.harmonic_features)
|
mi@0
|
313 ao.harmonic_features = pca.transform(ao.harmonic_features)
|
mi@0
|
314 ao.harmonic_ssm = getSSM(ao.harmonic_features)
|
mi@0
|
315
|
mi@0
|
316 ao.ssm_timestamps = np.array(map(lambda x: ao.tempo_timestamps[aggregation_step*x], np.arange(0, ao.gammatone_ssm.shape[0])))
|
mi@0
|
317
|
mi@0
|
318 audio_list.append(ao)
|
mi@0
|
319
|
mi@0
|
320 # Segment input audio using specified boundary retrieval method.
|
mi@0
|
321 print 'Segmenting using %s method' %options.BOUNDARY
|
mi@0
|
322 for i,ao in enumerate(audio_list):
|
mi@0
|
323 print 'processing: %s' %ao.name
|
mitian@1
|
324
|
mitian@1
|
325 # Experiment 1: segmentation using individual features.
|
mitian@1
|
326 if options.BOUNDARY == 'novelty':
|
mitian@1
|
327 # Peak picking from the novelty curve
|
mitian@1
|
328 gammatone_novelty, smoothed_gammatone_novelty, gammatone_bound_idxs = novelty_S.process(ao.gammatone_ssm, self.kernel_size, peak_picker)
|
mitian@1
|
329 timbre_novelty, smoothed_timbre_novelty, timbre_bound_idxs = novelty_S.process(ao.timbre_ssm, self.kernel_size, peak_picker)
|
mitian@1
|
330 tempo_novelty, smoothed_harmonic_novelty, tempo_bound_idxs = novelty_S.process(ao.tempo_ssm, self.kernel_size, peak_picker)
|
mitian@1
|
331 harmonic_novelty, smoothed_tempo_novelty, harmonic_bound_idxs = novelty_S.process(ao.harmonic_ssm, self.kernel_size, peak_picker)
|
mitian@1
|
332
|
mitian@1
|
333 if options.BOUNDARY == 'cnmf':
|
mitian@1
|
334 gammatone_bound_idxs = cnmf_S.segmentation(ao.gammatone_features, rank=rank, R=R, h=8, niter=300)
|
mitian@1
|
335 timbre_bound_idxs = cnmf_S.segmentation(ao.timbre_features, rank=rank, R=R, h=h, niter=300)
|
mitian@1
|
336 tempo_bound_idxs = cnmf_S.segmentation(ao.tempo_features, rank=rank, R=R, h=h, niter=300)
|
mitian@1
|
337 harmonic_bound_idxs = cnmf_S.segmentation(ao.harmonic_features, rank=rank, R=R, h=h, niter=300)
|
mitian@1
|
338
|
mitian@1
|
339 if options.BOUNDARY == 'foote':
|
mitian@1
|
340 gammatone_bound_idxs = foote_S.segmentation(ao.gammatone_features, M=M, Mg=Mg, L=L)
|
mitian@1
|
341 timbre_bound_idxs = foote_S.segmentation(ao.timbre_features, M=M, Mg=Mg, L=L)
|
mitian@1
|
342 tempo_bound_idxs = foote_S.segmentation(ao.tempo_features, M=M, Mg=Mg, L=L)
|
mitian@1
|
343 harmonic_bound_idxs = foote_S.segmentation(ao.harmonic_features, M=M, Mg=Mg, L=L)
|
mi@0
|
344
|
mitian@1
|
345 if options.BOUNDARY == 'sf':
|
mitian@1
|
346 gammatone_bound_idxs = sf_S.segmentation(ao.gammatone_features)
|
mitian@1
|
347 timbre_bound_idxs = sf_S.segmentation(ao.timbre_features)
|
mitian@1
|
348 tempo_bound_idxs = sf_S.segmentation(ao.tempo_features)
|
mitian@1
|
349 harmonic_bound_idxs = sf_S.segmentation(ao.harmonic_features)
|
mitian@1
|
350
|
mitian@1
|
351 if options.LABEL == 'fmc2d':
|
mitian@1
|
352 gammatone_bound_labels = fmc2d_S.compute_similarity(gammatone_bound_idxs, xmeans=True, N=N)
|
mitian@1
|
353 timbre_bound_labels = fmc2d_S.compute_similarity(timbre_bound_idxs, xmeans=True, N=N)
|
mitian@1
|
354 tempo_bound_labels = fmc2d_S.compute_similarity(tempo_bound_idxs, xmeans=True, N=N)
|
mitian@1
|
355 harmonic_bound_labels = fmc2d_S.compute_similarity(harmonic_bound_idxs, xmeans=True, N=N)
|
mitian@1
|
356
|
mitian@1
|
357 if options.LABEL == 'cnmf':
|
mitian@1
|
358 gammatone_bound_labels = cnmf_S.compute_labels(gammatone_bound_idxs, est_bound_idxs, nFrames)
|
mitian@1
|
359 timbre_bound_labels = cnmf_S.compute_labels(timbre_bound_idxs, est_bound_idxs, nFrames)
|
mitian@1
|
360 tempo_bound_labels = cnmf_S.compute_labels(tempo_bound_idxs, est_bound_idxs, nFrames)
|
mitian@1
|
361 harmonic_bound_labels = cnmf_S.compute_labels(harmonic_bound_idxs, est_bound_idxs, nFrames)
|
mitian@1
|
362
|
mitian@1
|
363 gammatone_detection = [0.0] + [ao.ssm_timestamps[int(np.rint(i))] for i in gammatone_novelty_peaks]
|
mitian@1
|
364 timbre_detection = [0.0] + [ao.ssm_timestamps[int(np.rint(i))] for i in timbre_novelty_peaks]
|
mitian@1
|
365 harmonic_detection = [0.0] + [ao.ssm_timestamps[int(np.rint(i))] for i in harmonic_novelty_peaks]
|
mitian@1
|
366 tempo_detection = [0.0] + [ao.ssm_timestamps[int(np.rint(i))] for i in tempo_novelty_peaks]
|
mitian@1
|
367
|
mitian@1
|
368 # Experiment 2: Trying combined features using the best boundary retrieval method
|
mi@0
|
369 ao_featureset = [ao.gammatone_features, ao.harmonic_features, ao.timbre_features, ao.tempo_features]
|
mi@0
|
370 feature_sel = [int(x) for x in options.FEATURES if x.isdigit()]
|
mi@0
|
371 ao_featureset = [ao_featureset[i] for i in feature_sel]
|
mi@0
|
372
|
mi@0
|
373
|
mi@0
|
374
|
mi@0
|
375 def main():
|
mi@0
|
376
|
mi@0
|
377 segmenter = SSMseg()
|
mi@0
|
378 segmenter.process()
|
mi@0
|
379
|
mi@0
|
380
|
mi@0
|
381 if __name__ == '__main__':
|
mi@0
|
382 main()
|
mi@0
|
383
|