c@41
|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
|
c@41
|
2
|
c@41
|
3 /*
|
c@67
|
4 * SimilarityPlugin.cpp
|
c@41
|
5 *
|
c@118
|
6 * Copyright 2009 Centre for Digital Music, Queen Mary, University of London.
|
c@135
|
7
|
c@135
|
8 This program is free software; you can redistribute it and/or
|
c@135
|
9 modify it under the terms of the GNU General Public License as
|
c@135
|
10 published by the Free Software Foundation; either version 2 of the
|
c@135
|
11 License, or (at your option) any later version. See the file
|
c@135
|
12 COPYING included with this distribution for more information.
|
c@41
|
13 */
|
c@41
|
14
|
c@41
|
15 #include <iostream>
|
c@44
|
16 #include <cstdio>
|
c@190
|
17 #include <algorithm>
|
c@41
|
18
|
c@41
|
19 #include "SimilarityPlugin.h"
|
c@42
|
20 #include "base/Pitch.h"
|
c@41
|
21 #include "dsp/mfcc/MFCC.h"
|
c@42
|
22 #include "dsp/chromagram/Chromagram.h"
|
c@41
|
23 #include "dsp/rateconversion/Decimator.h"
|
c@47
|
24 #include "dsp/rhythm/BeatSpectrum.h"
|
c@47
|
25 #include "maths/KLDivergence.h"
|
c@47
|
26 #include "maths/CosineDistance.h"
|
c@49
|
27 #include "maths/MathUtilities.h"
|
c@41
|
28
|
c@41
|
29 using std::string;
|
c@41
|
30 using std::vector;
|
c@41
|
31 using std::cerr;
|
c@41
|
32 using std::endl;
|
c@41
|
33 using std::ostringstream;
|
c@41
|
34
|
c@47
|
35 const float
|
c@47
|
36 SimilarityPlugin::m_noRhythm = 0.009;
|
c@47
|
37
|
c@47
|
38 const float
|
c@47
|
39 SimilarityPlugin::m_allRhythm = 0.991;
|
c@47
|
40
|
c@41
|
41 SimilarityPlugin::SimilarityPlugin(float inputSampleRate) :
|
c@41
|
42 Plugin(inputSampleRate),
|
c@42
|
43 m_type(TypeMFCC),
|
c@41
|
44 m_mfcc(0),
|
c@47
|
45 m_rhythmfcc(0),
|
c@42
|
46 m_chromagram(0),
|
c@41
|
47 m_decimator(0),
|
c@42
|
48 m_featureColumnSize(20),
|
c@48
|
49 m_rhythmWeighting(0.5f),
|
c@47
|
50 m_rhythmClipDuration(4.f), // seconds
|
c@47
|
51 m_rhythmClipOrigin(40.f), // seconds
|
c@47
|
52 m_rhythmClipFrameSize(0),
|
c@47
|
53 m_rhythmClipFrames(0),
|
c@47
|
54 m_rhythmColumnSize(20),
|
c@41
|
55 m_blockSize(0),
|
c@47
|
56 m_channels(0),
|
c@47
|
57 m_processRate(0),
|
c@47
|
58 m_frameNo(0),
|
c@47
|
59 m_done(false)
|
c@41
|
60 {
|
c@47
|
61 int rate = lrintf(m_inputSampleRate);
|
c@47
|
62 int internalRate = 22050;
|
c@47
|
63 int decimationFactor = rate / internalRate;
|
c@47
|
64 if (decimationFactor < 1) decimationFactor = 1;
|
c@47
|
65
|
c@47
|
66 // must be a power of two
|
c@47
|
67 while (decimationFactor & (decimationFactor - 1)) ++decimationFactor;
|
c@47
|
68
|
c@47
|
69 m_processRate = rate / decimationFactor; // may be 22050, 24000 etc
|
c@41
|
70 }
|
c@41
|
71
|
c@41
|
72 SimilarityPlugin::~SimilarityPlugin()
|
c@41
|
73 {
|
c@41
|
74 delete m_mfcc;
|
c@47
|
75 delete m_rhythmfcc;
|
c@42
|
76 delete m_chromagram;
|
c@41
|
77 delete m_decimator;
|
c@41
|
78 }
|
c@41
|
79
|
c@41
|
80 string
|
c@41
|
81 SimilarityPlugin::getIdentifier() const
|
c@41
|
82 {
|
c@41
|
83 return "qm-similarity";
|
c@41
|
84 }
|
c@41
|
85
|
c@41
|
86 string
|
c@41
|
87 SimilarityPlugin::getName() const
|
c@41
|
88 {
|
c@41
|
89 return "Similarity";
|
c@41
|
90 }
|
c@41
|
91
|
c@41
|
92 string
|
c@41
|
93 SimilarityPlugin::getDescription() const
|
c@41
|
94 {
|
c@42
|
95 return "Return a distance matrix for similarity between the input audio channels";
|
c@41
|
96 }
|
c@41
|
97
|
c@41
|
98 string
|
c@41
|
99 SimilarityPlugin::getMaker() const
|
c@41
|
100 {
|
c@50
|
101 return "Queen Mary, University of London";
|
c@41
|
102 }
|
c@41
|
103
|
c@41
|
104 int
|
c@41
|
105 SimilarityPlugin::getPluginVersion() const
|
c@41
|
106 {
|
c@41
|
107 return 1;
|
c@41
|
108 }
|
c@41
|
109
|
c@41
|
110 string
|
c@41
|
111 SimilarityPlugin::getCopyright() const
|
c@41
|
112 {
|
c@118
|
113 return "Plugin by Mark Levy, Kurt Jacobson and Chris Cannam. Copyright (c) 2009 QMUL - All Rights Reserved";
|
c@41
|
114 }
|
c@41
|
115
|
c@41
|
116 size_t
|
c@41
|
117 SimilarityPlugin::getMinChannelCount() const
|
c@41
|
118 {
|
c@43
|
119 return 1;
|
c@41
|
120 }
|
c@41
|
121
|
c@41
|
122 size_t
|
c@41
|
123 SimilarityPlugin::getMaxChannelCount() const
|
c@41
|
124 {
|
c@41
|
125 return 1024;
|
c@41
|
126 }
|
c@41
|
127
|
c@41
|
128 int
|
c@41
|
129 SimilarityPlugin::getDecimationFactor() const
|
c@41
|
130 {
|
c@41
|
131 int rate = lrintf(m_inputSampleRate);
|
c@47
|
132 return rate / m_processRate;
|
c@41
|
133 }
|
c@41
|
134
|
c@41
|
135 size_t
|
c@41
|
136 SimilarityPlugin::getPreferredStepSize() const
|
c@41
|
137 {
|
c@42
|
138 if (m_blockSize == 0) calculateBlockSize();
|
c@47
|
139
|
c@47
|
140 // there is also an assumption to this effect in process()
|
c@47
|
141 // (referring to m_fftSize/2 instead of a literal post-decimation
|
c@47
|
142 // step size):
|
c@45
|
143 return m_blockSize/2;
|
c@41
|
144 }
|
c@41
|
145
|
c@41
|
146 size_t
|
c@41
|
147 SimilarityPlugin::getPreferredBlockSize() const
|
c@41
|
148 {
|
c@42
|
149 if (m_blockSize == 0) calculateBlockSize();
|
c@42
|
150 return m_blockSize;
|
c@42
|
151 }
|
c@42
|
152
|
c@42
|
153 void
|
c@42
|
154 SimilarityPlugin::calculateBlockSize() const
|
c@42
|
155 {
|
c@42
|
156 if (m_blockSize != 0) return;
|
c@42
|
157 int decimationFactor = getDecimationFactor();
|
c@66
|
158 m_blockSize = 2048 * decimationFactor;
|
c@41
|
159 }
|
c@41
|
160
|
c@41
|
161 SimilarityPlugin::ParameterList SimilarityPlugin::getParameterDescriptors() const
|
c@41
|
162 {
|
c@41
|
163 ParameterList list;
|
c@42
|
164
|
c@42
|
165 ParameterDescriptor desc;
|
c@42
|
166 desc.identifier = "featureType";
|
c@42
|
167 desc.name = "Feature Type";
|
c@48
|
168 desc.description = "Audio feature used for similarity measure. Timbral: use the first 20 MFCCs (19 plus C0). Chromatic: use 12 bin-per-octave chroma. Rhythmic: compare beat spectra of short regions.";
|
c@42
|
169 desc.unit = "";
|
c@42
|
170 desc.minValue = 0;
|
c@48
|
171 desc.maxValue = 4;
|
c@48
|
172 desc.defaultValue = 1;
|
c@42
|
173 desc.isQuantized = true;
|
c@42
|
174 desc.quantizeStep = 1;
|
c@48
|
175 desc.valueNames.push_back("Timbre");
|
c@48
|
176 desc.valueNames.push_back("Timbre and Rhythm");
|
c@48
|
177 desc.valueNames.push_back("Chroma");
|
c@48
|
178 desc.valueNames.push_back("Chroma and Rhythm");
|
c@48
|
179 desc.valueNames.push_back("Rhythm only");
|
c@42
|
180 list.push_back(desc);
|
c@48
|
181 /*
|
c@47
|
182 desc.identifier = "rhythmWeighting";
|
c@47
|
183 desc.name = "Influence of Rhythm";
|
c@47
|
184 desc.description = "Proportion of similarity measure made up from rhythmic similarity component, from 0 (entirely timbral or chromatic) to 100 (entirely rhythmic).";
|
c@47
|
185 desc.unit = "%";
|
c@47
|
186 desc.minValue = 0;
|
c@47
|
187 desc.maxValue = 100;
|
c@47
|
188 desc.defaultValue = 0;
|
c@48
|
189 desc.isQuantized = false;
|
c@47
|
190 desc.valueNames.clear();
|
c@47
|
191 list.push_back(desc);
|
c@48
|
192 */
|
c@41
|
193 return list;
|
c@41
|
194 }
|
c@41
|
195
|
c@41
|
196 float
|
c@41
|
197 SimilarityPlugin::getParameter(std::string param) const
|
c@41
|
198 {
|
c@42
|
199 if (param == "featureType") {
|
c@48
|
200
|
c@48
|
201 if (m_rhythmWeighting > m_allRhythm) {
|
c@48
|
202 return 4;
|
c@48
|
203 }
|
c@48
|
204
|
c@48
|
205 switch (m_type) {
|
c@48
|
206
|
c@48
|
207 case TypeMFCC:
|
c@48
|
208 if (m_rhythmWeighting < m_noRhythm) return 0;
|
c@48
|
209 else return 1;
|
c@48
|
210 break;
|
c@48
|
211
|
c@48
|
212 case TypeChroma:
|
c@48
|
213 if (m_rhythmWeighting < m_noRhythm) return 2;
|
c@48
|
214 else return 3;
|
c@48
|
215 break;
|
c@48
|
216 }
|
c@48
|
217
|
c@48
|
218 return 1;
|
c@48
|
219
|
c@48
|
220 // } else if (param == "rhythmWeighting") {
|
c@48
|
221 // return nearbyint(m_rhythmWeighting * 100.0);
|
c@42
|
222 }
|
c@42
|
223
|
c@41
|
224 std::cerr << "WARNING: SimilarityPlugin::getParameter: unknown parameter \""
|
c@41
|
225 << param << "\"" << std::endl;
|
c@41
|
226 return 0.0;
|
c@41
|
227 }
|
c@41
|
228
|
c@41
|
229 void
|
c@41
|
230 SimilarityPlugin::setParameter(std::string param, float value)
|
c@41
|
231 {
|
c@42
|
232 if (param == "featureType") {
|
c@48
|
233
|
c@42
|
234 int v = int(value + 0.1);
|
c@48
|
235
|
c@48
|
236 Type newType = m_type;
|
c@48
|
237
|
c@48
|
238 switch (v) {
|
c@48
|
239 case 0: newType = TypeMFCC; m_rhythmWeighting = 0.0f; break;
|
c@48
|
240 case 1: newType = TypeMFCC; m_rhythmWeighting = 0.5f; break;
|
c@48
|
241 case 2: newType = TypeChroma; m_rhythmWeighting = 0.0f; break;
|
c@48
|
242 case 3: newType = TypeChroma; m_rhythmWeighting = 0.5f; break;
|
c@48
|
243 case 4: newType = TypeMFCC; m_rhythmWeighting = 1.f; break;
|
c@48
|
244 }
|
c@48
|
245
|
c@48
|
246 if (newType != m_type) m_blockSize = 0;
|
c@48
|
247
|
c@48
|
248 m_type = newType;
|
c@42
|
249 return;
|
c@48
|
250
|
c@48
|
251 // } else if (param == "rhythmWeighting") {
|
c@48
|
252 // m_rhythmWeighting = value / 100;
|
c@48
|
253 // return;
|
c@42
|
254 }
|
c@42
|
255
|
c@41
|
256 std::cerr << "WARNING: SimilarityPlugin::setParameter: unknown parameter \""
|
c@41
|
257 << param << "\"" << std::endl;
|
c@41
|
258 }
|
c@41
|
259
|
c@41
|
260 SimilarityPlugin::OutputList
|
c@41
|
261 SimilarityPlugin::getOutputDescriptors() const
|
c@41
|
262 {
|
c@41
|
263 OutputList list;
|
c@41
|
264
|
c@41
|
265 OutputDescriptor similarity;
|
c@43
|
266 similarity.identifier = "distancematrix";
|
c@43
|
267 similarity.name = "Distance Matrix";
|
c@43
|
268 similarity.description = "Distance matrix for similarity metric. Smaller = more similar. Should be symmetrical.";
|
c@41
|
269 similarity.unit = "";
|
c@41
|
270 similarity.hasFixedBinCount = true;
|
c@41
|
271 similarity.binCount = m_channels;
|
c@41
|
272 similarity.hasKnownExtents = false;
|
c@41
|
273 similarity.isQuantized = false;
|
c@41
|
274 similarity.sampleType = OutputDescriptor::FixedSampleRate;
|
c@41
|
275 similarity.sampleRate = 1;
|
c@41
|
276
|
c@43
|
277 m_distanceMatrixOutput = list.size();
|
c@41
|
278 list.push_back(similarity);
|
c@41
|
279
|
c@43
|
280 OutputDescriptor simvec;
|
c@43
|
281 simvec.identifier = "distancevector";
|
c@43
|
282 simvec.name = "Distance from First Channel";
|
c@43
|
283 simvec.description = "Distance vector for similarity of each channel to the first channel. Smaller = more similar.";
|
c@43
|
284 simvec.unit = "";
|
c@43
|
285 simvec.hasFixedBinCount = true;
|
c@43
|
286 simvec.binCount = m_channels;
|
c@43
|
287 simvec.hasKnownExtents = false;
|
c@43
|
288 simvec.isQuantized = false;
|
c@43
|
289 simvec.sampleType = OutputDescriptor::FixedSampleRate;
|
c@43
|
290 simvec.sampleRate = 1;
|
c@43
|
291
|
c@43
|
292 m_distanceVectorOutput = list.size();
|
c@43
|
293 list.push_back(simvec);
|
c@43
|
294
|
c@44
|
295 OutputDescriptor sortvec;
|
c@44
|
296 sortvec.identifier = "sorteddistancevector";
|
c@44
|
297 sortvec.name = "Ordered Distances from First Channel";
|
c@44
|
298 sortvec.description = "Vector of the order of other channels in similarity to the first, followed by distance vector for similarity of each to the first. Smaller = more similar.";
|
c@44
|
299 sortvec.unit = "";
|
c@44
|
300 sortvec.hasFixedBinCount = true;
|
c@44
|
301 sortvec.binCount = m_channels;
|
c@44
|
302 sortvec.hasKnownExtents = false;
|
c@44
|
303 sortvec.isQuantized = false;
|
c@44
|
304 sortvec.sampleType = OutputDescriptor::FixedSampleRate;
|
c@44
|
305 sortvec.sampleRate = 1;
|
c@44
|
306
|
c@44
|
307 m_sortedVectorOutput = list.size();
|
c@44
|
308 list.push_back(sortvec);
|
c@44
|
309
|
c@41
|
310 OutputDescriptor means;
|
c@41
|
311 means.identifier = "means";
|
c@42
|
312 means.name = "Feature Means";
|
c@43
|
313 means.description = "Means of the feature bins. Feature time (sec) corresponds to input channel. Number of bins depends on selected feature type.";
|
c@41
|
314 means.unit = "";
|
c@41
|
315 means.hasFixedBinCount = true;
|
c@43
|
316 means.binCount = m_featureColumnSize;
|
c@41
|
317 means.hasKnownExtents = false;
|
c@41
|
318 means.isQuantized = false;
|
c@43
|
319 means.sampleType = OutputDescriptor::FixedSampleRate;
|
c@43
|
320 means.sampleRate = 1;
|
c@41
|
321
|
c@43
|
322 m_meansOutput = list.size();
|
c@41
|
323 list.push_back(means);
|
c@41
|
324
|
c@41
|
325 OutputDescriptor variances;
|
c@41
|
326 variances.identifier = "variances";
|
c@42
|
327 variances.name = "Feature Variances";
|
c@43
|
328 variances.description = "Variances of the feature bins. Feature time (sec) corresponds to input channel. Number of bins depends on selected feature type.";
|
c@41
|
329 variances.unit = "";
|
c@41
|
330 variances.hasFixedBinCount = true;
|
c@43
|
331 variances.binCount = m_featureColumnSize;
|
c@41
|
332 variances.hasKnownExtents = false;
|
c@41
|
333 variances.isQuantized = false;
|
c@43
|
334 variances.sampleType = OutputDescriptor::FixedSampleRate;
|
c@43
|
335 variances.sampleRate = 1;
|
c@41
|
336
|
c@43
|
337 m_variancesOutput = list.size();
|
c@41
|
338 list.push_back(variances);
|
c@41
|
339
|
c@47
|
340 OutputDescriptor beatspectrum;
|
c@47
|
341 beatspectrum.identifier = "beatspectrum";
|
c@47
|
342 beatspectrum.name = "Beat Spectra";
|
c@47
|
343 beatspectrum.description = "Rhythmic self-similarity vectors (beat spectra) for the input channels. Feature time (sec) corresponds to input channel. Not returned if rhythm weighting is zero.";
|
c@47
|
344 beatspectrum.unit = "";
|
c@47
|
345 if (m_rhythmClipFrames > 0) {
|
c@47
|
346 beatspectrum.hasFixedBinCount = true;
|
c@47
|
347 beatspectrum.binCount = m_rhythmClipFrames / 2;
|
c@47
|
348 } else {
|
c@47
|
349 beatspectrum.hasFixedBinCount = false;
|
c@47
|
350 }
|
c@47
|
351 beatspectrum.hasKnownExtents = false;
|
c@47
|
352 beatspectrum.isQuantized = false;
|
c@47
|
353 beatspectrum.sampleType = OutputDescriptor::FixedSampleRate;
|
c@47
|
354 beatspectrum.sampleRate = 1;
|
c@47
|
355
|
c@47
|
356 m_beatSpectraOutput = list.size();
|
c@47
|
357 list.push_back(beatspectrum);
|
c@47
|
358
|
c@41
|
359 return list;
|
c@41
|
360 }
|
c@41
|
361
|
c@68
|
362 bool
|
c@68
|
363 SimilarityPlugin::initialise(size_t channels, size_t stepSize, size_t blockSize)
|
c@68
|
364 {
|
c@68
|
365 if (channels < getMinChannelCount()) return false;
|
c@68
|
366
|
c@68
|
367 // Using more than getMaxChannelCount is not actually a problem
|
c@68
|
368 // for us. Using "incorrect" step and block sizes would be fine
|
c@68
|
369 // for timbral or chroma similarity, but will break rhythmic
|
c@68
|
370 // similarity, so we'd better enforce these.
|
c@68
|
371
|
c@68
|
372 if (stepSize != getPreferredStepSize()) {
|
c@68
|
373 std::cerr << "SimilarityPlugin::initialise: supplied step size "
|
c@68
|
374 << stepSize << " differs from required step size "
|
c@68
|
375 << getPreferredStepSize() << std::endl;
|
c@68
|
376 return false;
|
c@68
|
377 }
|
c@68
|
378
|
c@68
|
379 if (blockSize != getPreferredBlockSize()) {
|
c@68
|
380 std::cerr << "SimilarityPlugin::initialise: supplied block size "
|
c@68
|
381 << blockSize << " differs from required block size "
|
c@68
|
382 << getPreferredBlockSize() << std::endl;
|
c@68
|
383 return false;
|
c@68
|
384 }
|
c@68
|
385
|
c@68
|
386 m_blockSize = blockSize;
|
c@68
|
387 m_channels = channels;
|
c@68
|
388
|
c@68
|
389 m_lastNonEmptyFrame = std::vector<int>(m_channels);
|
c@68
|
390 for (int i = 0; i < m_channels; ++i) m_lastNonEmptyFrame[i] = -1;
|
c@68
|
391
|
c@68
|
392 m_emptyFrameCount = std::vector<int>(m_channels);
|
c@68
|
393 for (int i = 0; i < m_channels; ++i) m_emptyFrameCount[i] = 0;
|
c@68
|
394
|
c@68
|
395 m_frameNo = 0;
|
c@68
|
396
|
c@68
|
397 int decimationFactor = getDecimationFactor();
|
c@68
|
398 if (decimationFactor > 1) {
|
c@68
|
399 m_decimator = new Decimator(m_blockSize, decimationFactor);
|
c@68
|
400 }
|
c@68
|
401
|
c@68
|
402 if (m_type == TypeMFCC) {
|
c@68
|
403
|
c@68
|
404 m_featureColumnSize = 20;
|
c@68
|
405
|
c@68
|
406 MFCCConfig config(m_processRate);
|
c@68
|
407 config.fftsize = 2048;
|
c@68
|
408 config.nceps = m_featureColumnSize - 1;
|
c@68
|
409 config.want_c0 = true;
|
c@68
|
410 config.logpower = 1;
|
c@68
|
411 m_mfcc = new MFCC(config);
|
c@68
|
412 m_fftSize = m_mfcc->getfftlength();
|
c@68
|
413 m_rhythmClipFrameSize = m_fftSize / 4;
|
c@68
|
414
|
c@68
|
415 // std::cerr << "MFCC FS = " << config.FS << ", FFT size = " << m_fftSize<< std::endl;
|
c@68
|
416
|
c@68
|
417 } else if (m_type == TypeChroma) {
|
c@68
|
418
|
c@68
|
419 m_featureColumnSize = 12;
|
c@68
|
420
|
c@68
|
421 // For simplicity, aim to have the chroma fft size equal to
|
c@68
|
422 // 2048, the same as the mfcc fft size (so the input block
|
c@68
|
423 // size does not depend on the feature type and we can use the
|
c@68
|
424 // same processing parameters for rhythm etc). This is also
|
c@68
|
425 // why getPreferredBlockSize can confidently return 2048 * the
|
c@68
|
426 // decimation factor.
|
c@68
|
427
|
c@68
|
428 // The fft size for a chromagram is the filterbank Q value
|
c@68
|
429 // times the sample rate, divided by the minimum frequency,
|
c@68
|
430 // rounded up to the nearest power of two.
|
c@68
|
431
|
c@68
|
432 double q = 1.0 / (pow(2.0, (1.0 / 12.0)) - 1.0);
|
c@68
|
433 double fmin = (q * m_processRate) / 2048.0;
|
c@68
|
434
|
c@68
|
435 // Round fmin up to the nearest MIDI pitch multiple of 12.
|
c@68
|
436 // So long as fmin is greater than 12 to start with, this
|
c@68
|
437 // should not change the resulting fft size.
|
c@68
|
438
|
c@68
|
439 int pmin = Pitch::getPitchForFrequency(float(fmin));
|
c@68
|
440 pmin = ((pmin / 12) + 1) * 12;
|
c@68
|
441 fmin = Pitch::getFrequencyForPitch(pmin);
|
c@68
|
442
|
c@68
|
443 float fmax = Pitch::getFrequencyForPitch(pmin + 36);
|
c@68
|
444
|
c@68
|
445 ChromaConfig config;
|
c@68
|
446 config.FS = m_processRate;
|
c@68
|
447 config.min = fmin;
|
c@68
|
448 config.max = fmax;
|
c@68
|
449 config.BPO = 12;
|
c@68
|
450 config.CQThresh = 0.0054;
|
c@68
|
451 // We don't normalise the chromagram's columns individually;
|
c@68
|
452 // we normalise the mean at the end instead
|
c@68
|
453 config.normalise = MathUtilities::NormaliseNone;
|
c@68
|
454 m_chromagram = new Chromagram(config);
|
c@68
|
455 m_fftSize = m_chromagram->getFrameSize();
|
c@68
|
456
|
c@68
|
457 if (m_fftSize != 2048) {
|
c@68
|
458 std::cerr << "WARNING: SimilarityPlugin::initialise: Internal processing FFT size " << m_fftSize << " != expected size 2048 in chroma mode" << std::endl;
|
c@68
|
459 }
|
c@68
|
460
|
c@68
|
461 // std::cerr << "fftsize = " << m_fftSize << std::endl;
|
c@68
|
462
|
c@68
|
463 m_rhythmClipFrameSize = m_fftSize / 4;
|
c@68
|
464
|
c@68
|
465 // std::cerr << "m_rhythmClipFrameSize = " << m_rhythmClipFrameSize << std::endl;
|
c@68
|
466 // std::cerr << "min = "<< config.min << ", max = " << config.max << std::endl;
|
c@68
|
467
|
c@68
|
468 } else {
|
c@68
|
469
|
c@68
|
470 std::cerr << "SimilarityPlugin::initialise: internal error: unknown type " << m_type << std::endl;
|
c@68
|
471 return false;
|
c@68
|
472 }
|
c@68
|
473
|
c@68
|
474 if (needRhythm()) {
|
c@68
|
475 m_rhythmClipFrames =
|
c@68
|
476 int(ceil((m_rhythmClipDuration * m_processRate)
|
c@68
|
477 / m_rhythmClipFrameSize));
|
c@68
|
478 // std::cerr << "SimilarityPlugin::initialise: rhythm clip requires "
|
c@68
|
479 // << m_rhythmClipFrames << " frames of size "
|
c@68
|
480 // << m_rhythmClipFrameSize << " at process rate "
|
c@68
|
481 // << m_processRate << " ( = "
|
c@68
|
482 // << (float(m_rhythmClipFrames * m_rhythmClipFrameSize) / m_processRate) << " sec )"
|
c@68
|
483 // << std::endl;
|
c@68
|
484
|
c@68
|
485 MFCCConfig config(m_processRate);
|
c@68
|
486 config.fftsize = m_rhythmClipFrameSize;
|
c@68
|
487 config.nceps = m_rhythmColumnSize - 1;
|
c@68
|
488 config.want_c0 = true;
|
c@68
|
489 config.logpower = 1;
|
c@68
|
490 config.window = RectangularWindow; // because no overlap
|
c@68
|
491 m_rhythmfcc = new MFCC(config);
|
c@68
|
492 }
|
c@68
|
493
|
c@68
|
494 for (int i = 0; i < m_channels; ++i) {
|
c@68
|
495
|
c@68
|
496 m_values.push_back(FeatureMatrix());
|
c@68
|
497
|
c@68
|
498 if (needRhythm()) {
|
c@68
|
499 m_rhythmValues.push_back(FeatureColumnQueue());
|
c@68
|
500 }
|
c@68
|
501 }
|
c@68
|
502
|
c@68
|
503 m_done = false;
|
c@68
|
504
|
c@68
|
505 return true;
|
c@68
|
506 }
|
c@68
|
507
|
c@68
|
508 void
|
c@68
|
509 SimilarityPlugin::reset()
|
c@68
|
510 {
|
c@178
|
511 for (int i = 0; i < int(m_values.size()); ++i) {
|
c@68
|
512 m_values[i].clear();
|
c@68
|
513 }
|
c@68
|
514
|
c@178
|
515 for (int i = 0; i < int(m_rhythmValues.size()); ++i) {
|
c@68
|
516 m_rhythmValues[i].clear();
|
c@68
|
517 }
|
c@68
|
518
|
c@178
|
519 for (int i = 0; i < int(m_lastNonEmptyFrame.size()); ++i) {
|
c@68
|
520 m_lastNonEmptyFrame[i] = -1;
|
c@68
|
521 }
|
c@68
|
522
|
c@178
|
523 for (int i = 0; i < int(m_emptyFrameCount.size()); ++i) {
|
c@68
|
524 m_emptyFrameCount[i] = 0;
|
c@68
|
525 }
|
c@68
|
526
|
c@68
|
527 m_done = false;
|
c@68
|
528 }
|
c@68
|
529
|
c@41
|
530 SimilarityPlugin::FeatureSet
|
c@41
|
531 SimilarityPlugin::process(const float *const *inputBuffers, Vamp::RealTime /* timestamp */)
|
c@41
|
532 {
|
c@47
|
533 if (m_done) {
|
c@47
|
534 return FeatureSet();
|
c@47
|
535 }
|
c@47
|
536
|
c@41
|
537 double *dblbuf = new double[m_blockSize];
|
c@41
|
538 double *decbuf = dblbuf;
|
c@42
|
539 if (m_decimator) decbuf = new double[m_fftSize];
|
c@42
|
540
|
c@47
|
541 double *raw = new double[std::max(m_featureColumnSize,
|
c@47
|
542 m_rhythmColumnSize)];
|
c@41
|
543
|
c@43
|
544 float threshold = 1e-10;
|
c@43
|
545
|
c@47
|
546 bool someRhythmFrameNeeded = false;
|
c@47
|
547
|
c@178
|
548 for (int c = 0; c < m_channels; ++c) {
|
c@41
|
549
|
c@43
|
550 bool empty = true;
|
c@43
|
551
|
c@41
|
552 for (int i = 0; i < m_blockSize; ++i) {
|
c@43
|
553 float val = inputBuffers[c][i];
|
c@43
|
554 if (fabs(val) > threshold) empty = false;
|
c@43
|
555 dblbuf[i] = val;
|
c@41
|
556 }
|
c@41
|
557
|
c@47
|
558 if (empty) {
|
c@47
|
559 if (needRhythm() && ((m_frameNo % 2) == 0)) {
|
c@47
|
560 for (int i = 0; i < m_fftSize / m_rhythmClipFrameSize; ++i) {
|
c@178
|
561 if (int(m_rhythmValues[c].size()) < m_rhythmClipFrames) {
|
c@47
|
562 FeatureColumn mf(m_rhythmColumnSize);
|
c@47
|
563 for (int i = 0; i < m_rhythmColumnSize; ++i) {
|
c@47
|
564 mf[i] = 0.0;
|
c@47
|
565 }
|
c@47
|
566 m_rhythmValues[c].push_back(mf);
|
c@47
|
567 }
|
c@47
|
568 }
|
c@47
|
569 }
|
c@60
|
570 m_emptyFrameCount[c]++;
|
c@47
|
571 continue;
|
c@47
|
572 }
|
c@47
|
573
|
c@44
|
574 m_lastNonEmptyFrame[c] = m_frameNo;
|
c@43
|
575
|
c@41
|
576 if (m_decimator) {
|
c@41
|
577 m_decimator->process(dblbuf, decbuf);
|
c@41
|
578 }
|
c@42
|
579
|
c@47
|
580 if (needTimbre()) {
|
c@47
|
581
|
c@66
|
582 FeatureColumn mf(m_featureColumnSize);
|
c@66
|
583
|
c@47
|
584 if (m_type == TypeMFCC) {
|
c@47
|
585 m_mfcc->process(decbuf, raw);
|
c@66
|
586 for (int i = 0; i < m_featureColumnSize; ++i) {
|
c@66
|
587 mf[i] = raw[i];
|
c@66
|
588 }
|
c@47
|
589 } else if (m_type == TypeChroma) {
|
c@66
|
590 double *chroma = m_chromagram->process(decbuf);
|
c@66
|
591 for (int i = 0; i < m_featureColumnSize; ++i) {
|
c@66
|
592 mf[i] = chroma[i];
|
c@66
|
593 }
|
c@47
|
594 }
|
c@41
|
595
|
c@47
|
596 m_values[c].push_back(mf);
|
c@44
|
597 }
|
c@41
|
598
|
c@47
|
599 // std::cerr << "needRhythm = " << needRhythm() << ", frame = " << m_frameNo << std::endl;
|
c@47
|
600
|
c@47
|
601 if (needRhythm() && ((m_frameNo % 2) == 0)) {
|
c@47
|
602
|
c@47
|
603 // The incoming frames are overlapping; we only use every
|
c@47
|
604 // other one, because we don't want the overlap (it would
|
c@47
|
605 // screw up the rhythm)
|
c@47
|
606
|
c@47
|
607 int frameOffset = 0;
|
c@47
|
608
|
c@47
|
609 while (frameOffset + m_rhythmClipFrameSize <= m_fftSize) {
|
c@47
|
610
|
c@47
|
611 bool needRhythmFrame = true;
|
c@47
|
612
|
c@178
|
613 if (int(m_rhythmValues[c].size()) >= m_rhythmClipFrames) {
|
c@47
|
614
|
c@47
|
615 needRhythmFrame = false;
|
c@47
|
616
|
c@47
|
617 // assumes hopsize = framesize/2
|
c@47
|
618 float current = m_frameNo * (m_fftSize/2) + frameOffset;
|
c@47
|
619 current = current / m_processRate;
|
c@47
|
620 if (current - m_rhythmClipDuration < m_rhythmClipOrigin) {
|
c@47
|
621 needRhythmFrame = true;
|
c@47
|
622 m_rhythmValues[c].pop_front();
|
c@47
|
623 }
|
c@47
|
624
|
c@53
|
625 // if (needRhythmFrame) {
|
c@53
|
626 // std::cerr << "at current = " <<current << " (frame = " << m_frameNo << "), have " << m_rhythmValues[c].size() << ", need rhythm = " << needRhythmFrame << std::endl;
|
c@53
|
627 // }
|
c@47
|
628
|
c@47
|
629 }
|
c@47
|
630
|
c@47
|
631 if (needRhythmFrame) {
|
c@47
|
632
|
c@47
|
633 someRhythmFrameNeeded = true;
|
c@47
|
634
|
c@47
|
635 m_rhythmfcc->process(decbuf + frameOffset, raw);
|
c@47
|
636
|
c@47
|
637 FeatureColumn mf(m_rhythmColumnSize);
|
c@47
|
638 for (int i = 0; i < m_rhythmColumnSize; ++i) {
|
c@47
|
639 mf[i] = raw[i];
|
c@47
|
640 }
|
c@47
|
641
|
c@47
|
642 m_rhythmValues[c].push_back(mf);
|
c@47
|
643 }
|
c@47
|
644
|
c@47
|
645 frameOffset += m_rhythmClipFrameSize;
|
c@47
|
646 }
|
c@47
|
647 }
|
c@47
|
648 }
|
c@47
|
649
|
c@47
|
650 if (!needTimbre() && !someRhythmFrameNeeded && ((m_frameNo % 2) == 0)) {
|
c@53
|
651 // std::cerr << "done!" << std::endl;
|
c@47
|
652 m_done = true;
|
c@41
|
653 }
|
c@41
|
654
|
c@41
|
655 if (m_decimator) delete[] decbuf;
|
c@41
|
656 delete[] dblbuf;
|
c@47
|
657 delete[] raw;
|
c@41
|
658
|
c@44
|
659 ++m_frameNo;
|
c@44
|
660
|
c@41
|
661 return FeatureSet();
|
c@41
|
662 }
|
c@41
|
663
|
c@47
|
664 SimilarityPlugin::FeatureMatrix
|
c@47
|
665 SimilarityPlugin::calculateTimbral(FeatureSet &returnFeatures)
|
c@41
|
666 {
|
c@47
|
667 FeatureMatrix m(m_channels); // means
|
c@47
|
668 FeatureMatrix v(m_channels); // variances
|
c@41
|
669
|
c@41
|
670 for (int i = 0; i < m_channels; ++i) {
|
c@41
|
671
|
c@42
|
672 FeatureColumn mean(m_featureColumnSize), variance(m_featureColumnSize);
|
c@41
|
673
|
c@42
|
674 for (int j = 0; j < m_featureColumnSize; ++j) {
|
c@41
|
675
|
c@43
|
676 mean[j] = 0.0;
|
c@43
|
677 variance[j] = 0.0;
|
c@41
|
678 int count;
|
c@41
|
679
|
c@44
|
680 // We want to take values up to, but not including, the
|
c@44
|
681 // last non-empty frame (which may be partial)
|
c@43
|
682
|
c@60
|
683 int sz = m_lastNonEmptyFrame[i] - m_emptyFrameCount[i];
|
c@44
|
684 if (sz < 0) sz = 0;
|
c@178
|
685 if (sz >= int(m_values[i].size())) {
|
c@178
|
686 sz = int(m_values[i].size())-1;
|
c@178
|
687 }
|
c@43
|
688
|
c@41
|
689 count = 0;
|
c@43
|
690 for (int k = 0; k < sz; ++k) {
|
c@42
|
691 double val = m_values[i][k][j];
|
c@130
|
692 if (ISNAN(val) || ISINF(val)) continue;
|
c@41
|
693 mean[j] += val;
|
c@41
|
694 ++count;
|
c@41
|
695 }
|
c@41
|
696 if (count > 0) mean[j] /= count;
|
c@41
|
697
|
c@41
|
698 count = 0;
|
c@43
|
699 for (int k = 0; k < sz; ++k) {
|
c@42
|
700 double val = ((m_values[i][k][j] - mean[j]) *
|
c@42
|
701 (m_values[i][k][j] - mean[j]));
|
c@130
|
702 if (ISNAN(val) || ISINF(val)) continue;
|
c@41
|
703 variance[j] += val;
|
c@41
|
704 ++count;
|
c@41
|
705 }
|
c@41
|
706 if (count > 0) variance[j] /= count;
|
c@41
|
707 }
|
c@41
|
708
|
c@41
|
709 m[i] = mean;
|
c@41
|
710 v[i] = variance;
|
c@41
|
711 }
|
c@41
|
712
|
c@47
|
713 FeatureMatrix distances(m_channels);
|
c@42
|
714
|
c@48
|
715 if (m_type == TypeMFCC) {
|
c@48
|
716
|
c@48
|
717 // "Despite the fact that MFCCs extracted from music are
|
c@48
|
718 // clearly not Gaussian, [14] showed, somewhat surprisingly,
|
c@48
|
719 // that a similarity function comparing single Gaussians
|
c@48
|
720 // modelling MFCCs for each track can perform as well as
|
c@48
|
721 // mixture models. A great advantage of using single
|
c@48
|
722 // Gaussians is that a simple closed form exists for the KL
|
c@48
|
723 // divergence." -- Mark Levy, "Lightweight measures for
|
c@48
|
724 // timbral similarity of musical audio"
|
c@48
|
725 // (http://www.elec.qmul.ac.uk/easaier/papers/mlevytimbralsimilarity.pdf)
|
c@48
|
726
|
c@48
|
727 KLDivergence kld;
|
c@48
|
728
|
c@48
|
729 for (int i = 0; i < m_channels; ++i) {
|
c@48
|
730 for (int j = 0; j < m_channels; ++j) {
|
c@48
|
731 double d = kld.distanceGaussian(m[i], v[i], m[j], v[j]);
|
c@48
|
732 distances[i].push_back(d);
|
c@48
|
733 }
|
c@48
|
734 }
|
c@48
|
735
|
c@48
|
736 } else {
|
c@48
|
737
|
c@49
|
738 // We use the KL divergence for distributions of discrete
|
c@49
|
739 // variables, as chroma are histograms already. Or at least,
|
c@49
|
740 // they will be when we've normalised them like this:
|
c@49
|
741 for (int i = 0; i < m_channels; ++i) {
|
c@49
|
742 MathUtilities::normalise(m[i], MathUtilities::NormaliseUnitSum);
|
c@49
|
743 }
|
c@48
|
744
|
c@48
|
745 KLDivergence kld;
|
c@48
|
746
|
c@48
|
747 for (int i = 0; i < m_channels; ++i) {
|
c@48
|
748 for (int j = 0; j < m_channels; ++j) {
|
c@48
|
749 double d = kld.distanceDistribution(m[i], m[j], true);
|
c@48
|
750 distances[i].push_back(d);
|
c@48
|
751 }
|
c@41
|
752 }
|
c@41
|
753 }
|
c@47
|
754
|
c@44
|
755 Feature feature;
|
c@44
|
756 feature.hasTimestamp = true;
|
c@44
|
757
|
c@44
|
758 char labelBuffer[100];
|
c@43
|
759
|
c@41
|
760 for (int i = 0; i < m_channels; ++i) {
|
c@41
|
761
|
c@41
|
762 feature.timestamp = Vamp::RealTime(i, 0);
|
c@41
|
763
|
c@44
|
764 sprintf(labelBuffer, "Means for channel %d", i+1);
|
c@44
|
765 feature.label = labelBuffer;
|
c@44
|
766
|
c@41
|
767 feature.values.clear();
|
c@42
|
768 for (int k = 0; k < m_featureColumnSize; ++k) {
|
c@41
|
769 feature.values.push_back(m[i][k]);
|
c@41
|
770 }
|
c@41
|
771
|
c@43
|
772 returnFeatures[m_meansOutput].push_back(feature);
|
c@41
|
773
|
c@44
|
774 sprintf(labelBuffer, "Variances for channel %d", i+1);
|
c@44
|
775 feature.label = labelBuffer;
|
c@44
|
776
|
c@41
|
777 feature.values.clear();
|
c@42
|
778 for (int k = 0; k < m_featureColumnSize; ++k) {
|
c@41
|
779 feature.values.push_back(v[i][k]);
|
c@41
|
780 }
|
c@41
|
781
|
c@43
|
782 returnFeatures[m_variancesOutput].push_back(feature);
|
c@47
|
783 }
|
c@47
|
784
|
c@47
|
785 return distances;
|
c@47
|
786 }
|
c@47
|
787
|
c@47
|
788 SimilarityPlugin::FeatureMatrix
|
c@47
|
789 SimilarityPlugin::calculateRhythmic(FeatureSet &returnFeatures)
|
c@47
|
790 {
|
c@47
|
791 if (!needRhythm()) return FeatureMatrix();
|
c@47
|
792
|
c@60
|
793 // std::cerr << "SimilarityPlugin::initialise: rhythm clip for channel 0 contains "
|
c@60
|
794 // << m_rhythmValues[0].size() << " frames of size "
|
c@60
|
795 // << m_rhythmClipFrameSize << " at process rate "
|
c@60
|
796 // << m_processRate << " ( = "
|
c@60
|
797 // << (float(m_rhythmValues[0].size() * m_rhythmClipFrameSize) / m_processRate) << " sec )"
|
c@60
|
798 // << std::endl;
|
c@60
|
799
|
c@47
|
800 BeatSpectrum bscalc;
|
c@47
|
801 CosineDistance cd;
|
c@47
|
802
|
c@47
|
803 // Our rhythm feature matrix is a deque of vectors for practical
|
c@47
|
804 // reasons, but BeatSpectrum::process wants a vector of vectors
|
c@47
|
805 // (which is what FeatureMatrix happens to be).
|
c@47
|
806
|
c@47
|
807 FeatureMatrixSet bsinput(m_channels);
|
c@47
|
808 for (int i = 0; i < m_channels; ++i) {
|
c@178
|
809 for (int j = 0; j < int(m_rhythmValues[i].size()); ++j) {
|
c@47
|
810 bsinput[i].push_back(m_rhythmValues[i][j]);
|
c@47
|
811 }
|
c@47
|
812 }
|
c@47
|
813
|
c@47
|
814 FeatureMatrix bs(m_channels);
|
c@47
|
815 for (int i = 0; i < m_channels; ++i) {
|
c@47
|
816 bs[i] = bscalc.process(bsinput[i]);
|
c@47
|
817 }
|
c@47
|
818
|
c@47
|
819 FeatureMatrix distances(m_channels);
|
c@47
|
820 for (int i = 0; i < m_channels; ++i) {
|
c@47
|
821 for (int j = 0; j < m_channels; ++j) {
|
c@47
|
822 double d = cd.distance(bs[i], bs[j]);
|
c@47
|
823 distances[i].push_back(d);
|
c@47
|
824 }
|
c@47
|
825 }
|
c@47
|
826
|
c@47
|
827 Feature feature;
|
c@47
|
828 feature.hasTimestamp = true;
|
c@47
|
829
|
c@47
|
830 char labelBuffer[100];
|
c@47
|
831
|
c@47
|
832 for (int i = 0; i < m_channels; ++i) {
|
c@47
|
833
|
c@47
|
834 feature.timestamp = Vamp::RealTime(i, 0);
|
c@47
|
835
|
c@47
|
836 sprintf(labelBuffer, "Beat spectrum for channel %d", i+1);
|
c@47
|
837 feature.label = labelBuffer;
|
c@47
|
838
|
c@47
|
839 feature.values.clear();
|
c@178
|
840 for (int j = 0; j < int(bs[i].size()); ++j) {
|
c@47
|
841 feature.values.push_back(bs[i][j]);
|
c@47
|
842 }
|
c@47
|
843
|
c@47
|
844 returnFeatures[m_beatSpectraOutput].push_back(feature);
|
c@47
|
845 }
|
c@47
|
846
|
c@47
|
847 return distances;
|
c@47
|
848 }
|
c@47
|
849
|
c@47
|
850 double
|
c@47
|
851 SimilarityPlugin::getDistance(const FeatureMatrix &timbral,
|
c@47
|
852 const FeatureMatrix &rhythmic,
|
c@47
|
853 int i, int j)
|
c@47
|
854 {
|
c@47
|
855 double distance = 1.0;
|
c@47
|
856 if (needTimbre()) distance *= timbral[i][j];
|
c@47
|
857 if (needRhythm()) distance *= rhythmic[i][j];
|
c@47
|
858 return distance;
|
c@47
|
859 }
|
c@47
|
860
|
c@47
|
861 SimilarityPlugin::FeatureSet
|
c@47
|
862 SimilarityPlugin::getRemainingFeatures()
|
c@47
|
863 {
|
c@47
|
864 FeatureSet returnFeatures;
|
c@47
|
865
|
c@47
|
866 // We want to return a matrix of the distances between channels,
|
c@47
|
867 // but Vamp doesn't have a matrix return type so we will actually
|
c@47
|
868 // return a series of vectors
|
c@47
|
869
|
c@47
|
870 FeatureMatrix timbralDistances, rhythmicDistances;
|
c@47
|
871
|
c@47
|
872 if (needTimbre()) {
|
c@47
|
873 timbralDistances = calculateTimbral(returnFeatures);
|
c@47
|
874 }
|
c@47
|
875
|
c@47
|
876 if (needRhythm()) {
|
c@47
|
877 rhythmicDistances = calculateRhythmic(returnFeatures);
|
c@47
|
878 }
|
c@47
|
879
|
c@47
|
880 // We give all features a timestamp, otherwise hosts will tend to
|
c@47
|
881 // stamp them at the end of the file, which is annoying
|
c@47
|
882
|
c@47
|
883 Feature feature;
|
c@47
|
884 feature.hasTimestamp = true;
|
c@47
|
885
|
c@47
|
886 Feature distanceVectorFeature;
|
c@47
|
887 distanceVectorFeature.label = "Distance from first channel";
|
c@47
|
888 distanceVectorFeature.hasTimestamp = true;
|
c@47
|
889 distanceVectorFeature.timestamp = Vamp::RealTime::zeroTime;
|
c@47
|
890
|
c@47
|
891 std::map<double, int> sorted;
|
c@47
|
892
|
c@47
|
893 char labelBuffer[100];
|
c@47
|
894
|
c@47
|
895 for (int i = 0; i < m_channels; ++i) {
|
c@47
|
896
|
c@47
|
897 feature.timestamp = Vamp::RealTime(i, 0);
|
c@41
|
898
|
c@41
|
899 feature.values.clear();
|
c@41
|
900 for (int j = 0; j < m_channels; ++j) {
|
c@47
|
901 double dist = getDistance(timbralDistances, rhythmicDistances, i, j);
|
c@47
|
902 feature.values.push_back(dist);
|
c@41
|
903 }
|
c@43
|
904
|
c@44
|
905 sprintf(labelBuffer, "Distances from channel %d", i+1);
|
c@44
|
906 feature.label = labelBuffer;
|
c@41
|
907
|
c@43
|
908 returnFeatures[m_distanceMatrixOutput].push_back(feature);
|
c@43
|
909
|
c@47
|
910 double fromFirst =
|
c@47
|
911 getDistance(timbralDistances, rhythmicDistances, 0, i);
|
c@44
|
912
|
c@47
|
913 distanceVectorFeature.values.push_back(fromFirst);
|
c@47
|
914 sorted[fromFirst] = i;
|
c@41
|
915 }
|
c@41
|
916
|
c@43
|
917 returnFeatures[m_distanceVectorOutput].push_back(distanceVectorFeature);
|
c@43
|
918
|
c@44
|
919 feature.label = "Order of channels by similarity to first channel";
|
c@44
|
920 feature.values.clear();
|
c@44
|
921 feature.timestamp = Vamp::RealTime(0, 0);
|
c@44
|
922
|
c@44
|
923 for (std::map<double, int>::iterator i = sorted.begin();
|
c@44
|
924 i != sorted.end(); ++i) {
|
c@45
|
925 feature.values.push_back(i->second + 1);
|
c@44
|
926 }
|
c@44
|
927
|
c@44
|
928 returnFeatures[m_sortedVectorOutput].push_back(feature);
|
c@44
|
929
|
c@44
|
930 feature.label = "Ordered distances of channels from first channel";
|
c@44
|
931 feature.values.clear();
|
c@44
|
932 feature.timestamp = Vamp::RealTime(1, 0);
|
c@44
|
933
|
c@44
|
934 for (std::map<double, int>::iterator i = sorted.begin();
|
c@44
|
935 i != sorted.end(); ++i) {
|
c@44
|
936 feature.values.push_back(i->first);
|
c@44
|
937 }
|
c@44
|
938
|
c@44
|
939 returnFeatures[m_sortedVectorOutput].push_back(feature);
|
c@44
|
940
|
c@41
|
941 return returnFeatures;
|
c@41
|
942 }
|