c@41
|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
|
c@41
|
2
|
c@41
|
3 /*
|
c@41
|
4 * SegmenterPlugin.cpp
|
c@41
|
5 *
|
c@41
|
6 * Copyright 2008 Centre for Digital Music, Queen Mary, University of London.
|
c@41
|
7 * All rights reserved.
|
c@41
|
8 */
|
c@41
|
9
|
c@41
|
10 #include <iostream>
|
c@44
|
11 #include <cstdio>
|
c@41
|
12
|
c@41
|
13 #include "SimilarityPlugin.h"
|
c@42
|
14 #include "base/Pitch.h"
|
c@41
|
15 #include "dsp/mfcc/MFCC.h"
|
c@42
|
16 #include "dsp/chromagram/Chromagram.h"
|
c@41
|
17 #include "dsp/rateconversion/Decimator.h"
|
c@47
|
18 #include "dsp/rhythm/BeatSpectrum.h"
|
c@47
|
19 #include "maths/KLDivergence.h"
|
c@47
|
20 #include "maths/CosineDistance.h"
|
c@49
|
21 #include "maths/MathUtilities.h"
|
c@41
|
22
|
c@41
|
23 using std::string;
|
c@41
|
24 using std::vector;
|
c@41
|
25 using std::cerr;
|
c@41
|
26 using std::endl;
|
c@41
|
27 using std::ostringstream;
|
c@41
|
28
|
c@47
|
29 const float
|
c@47
|
30 SimilarityPlugin::m_noRhythm = 0.009;
|
c@47
|
31
|
c@47
|
32 const float
|
c@47
|
33 SimilarityPlugin::m_allRhythm = 0.991;
|
c@47
|
34
|
c@41
|
35 SimilarityPlugin::SimilarityPlugin(float inputSampleRate) :
|
c@41
|
36 Plugin(inputSampleRate),
|
c@42
|
37 m_type(TypeMFCC),
|
c@41
|
38 m_mfcc(0),
|
c@47
|
39 m_rhythmfcc(0),
|
c@42
|
40 m_chromagram(0),
|
c@41
|
41 m_decimator(0),
|
c@42
|
42 m_featureColumnSize(20),
|
c@48
|
43 m_rhythmWeighting(0.5f),
|
c@47
|
44 m_rhythmClipDuration(4.f), // seconds
|
c@47
|
45 m_rhythmClipOrigin(40.f), // seconds
|
c@47
|
46 m_rhythmClipFrameSize(0),
|
c@47
|
47 m_rhythmClipFrames(0),
|
c@47
|
48 m_rhythmColumnSize(20),
|
c@41
|
49 m_blockSize(0),
|
c@47
|
50 m_channels(0),
|
c@47
|
51 m_processRate(0),
|
c@47
|
52 m_frameNo(0),
|
c@47
|
53 m_done(false)
|
c@41
|
54 {
|
c@47
|
55 int rate = lrintf(m_inputSampleRate);
|
c@47
|
56 int internalRate = 22050;
|
c@47
|
57 int decimationFactor = rate / internalRate;
|
c@47
|
58 if (decimationFactor < 1) decimationFactor = 1;
|
c@47
|
59
|
c@47
|
60 // must be a power of two
|
c@47
|
61 while (decimationFactor & (decimationFactor - 1)) ++decimationFactor;
|
c@47
|
62
|
c@47
|
63 m_processRate = rate / decimationFactor; // may be 22050, 24000 etc
|
c@41
|
64 }
|
c@41
|
65
|
c@41
|
66 SimilarityPlugin::~SimilarityPlugin()
|
c@41
|
67 {
|
c@41
|
68 delete m_mfcc;
|
c@47
|
69 delete m_rhythmfcc;
|
c@42
|
70 delete m_chromagram;
|
c@41
|
71 delete m_decimator;
|
c@41
|
72 }
|
c@41
|
73
|
c@41
|
74 string
|
c@41
|
75 SimilarityPlugin::getIdentifier() const
|
c@41
|
76 {
|
c@41
|
77 return "qm-similarity";
|
c@41
|
78 }
|
c@41
|
79
|
c@41
|
80 string
|
c@41
|
81 SimilarityPlugin::getName() const
|
c@41
|
82 {
|
c@41
|
83 return "Similarity";
|
c@41
|
84 }
|
c@41
|
85
|
c@41
|
86 string
|
c@41
|
87 SimilarityPlugin::getDescription() const
|
c@41
|
88 {
|
c@42
|
89 return "Return a distance matrix for similarity between the input audio channels";
|
c@41
|
90 }
|
c@41
|
91
|
c@41
|
92 string
|
c@41
|
93 SimilarityPlugin::getMaker() const
|
c@41
|
94 {
|
c@50
|
95 return "Queen Mary, University of London";
|
c@41
|
96 }
|
c@41
|
97
|
c@41
|
98 int
|
c@41
|
99 SimilarityPlugin::getPluginVersion() const
|
c@41
|
100 {
|
c@41
|
101 return 1;
|
c@41
|
102 }
|
c@41
|
103
|
c@41
|
104 string
|
c@41
|
105 SimilarityPlugin::getCopyright() const
|
c@41
|
106 {
|
c@50
|
107 return "Plugin by Mark Levy, Kurt Jacobson and Chris Cannam. Copyright (c) 2008 QMUL - All Rights Reserved";
|
c@41
|
108 }
|
c@41
|
109
|
c@41
|
110 size_t
|
c@41
|
111 SimilarityPlugin::getMinChannelCount() const
|
c@41
|
112 {
|
c@43
|
113 return 1;
|
c@41
|
114 }
|
c@41
|
115
|
c@41
|
116 size_t
|
c@41
|
117 SimilarityPlugin::getMaxChannelCount() const
|
c@41
|
118 {
|
c@41
|
119 return 1024;
|
c@41
|
120 }
|
c@41
|
121
|
c@41
|
122 bool
|
c@41
|
123 SimilarityPlugin::initialise(size_t channels, size_t stepSize, size_t blockSize)
|
c@41
|
124 {
|
c@52
|
125 if (channels < getMinChannelCount()) return false;
|
c@52
|
126
|
c@52
|
127 // Using more than getMaxChannelCount is not actually a problem
|
c@52
|
128 // for us. Using "incorrect" step and block sizes would be fine
|
c@52
|
129 // for timbral or chroma similarity, but will break rhythmic
|
c@52
|
130 // similarity, so we'd better enforce these.
|
c@41
|
131
|
c@41
|
132 if (stepSize != getPreferredStepSize()) {
|
c@41
|
133 std::cerr << "SimilarityPlugin::initialise: supplied step size "
|
c@41
|
134 << stepSize << " differs from required step size "
|
c@41
|
135 << getPreferredStepSize() << std::endl;
|
c@41
|
136 return false;
|
c@41
|
137 }
|
c@41
|
138
|
c@41
|
139 if (blockSize != getPreferredBlockSize()) {
|
c@41
|
140 std::cerr << "SimilarityPlugin::initialise: supplied block size "
|
c@41
|
141 << blockSize << " differs from required block size "
|
c@41
|
142 << getPreferredBlockSize() << std::endl;
|
c@41
|
143 return false;
|
c@41
|
144 }
|
c@41
|
145
|
c@41
|
146 m_blockSize = blockSize;
|
c@41
|
147 m_channels = channels;
|
c@41
|
148
|
c@44
|
149 m_lastNonEmptyFrame = std::vector<int>(m_channels);
|
c@44
|
150 for (int i = 0; i < m_channels; ++i) m_lastNonEmptyFrame[i] = -1;
|
c@60
|
151
|
c@60
|
152 m_emptyFrameCount = std::vector<int>(m_channels);
|
c@60
|
153 for (int i = 0; i < m_channels; ++i) m_emptyFrameCount[i] = 0;
|
c@60
|
154
|
c@44
|
155 m_frameNo = 0;
|
c@44
|
156
|
c@41
|
157 int decimationFactor = getDecimationFactor();
|
c@41
|
158 if (decimationFactor > 1) {
|
c@42
|
159 m_decimator = new Decimator(m_blockSize, decimationFactor);
|
c@41
|
160 }
|
c@41
|
161
|
c@42
|
162 if (m_type == TypeMFCC) {
|
c@42
|
163
|
c@42
|
164 m_featureColumnSize = 20;
|
c@42
|
165
|
c@47
|
166 MFCCConfig config(m_processRate);
|
c@42
|
167 config.fftsize = 2048;
|
c@42
|
168 config.nceps = m_featureColumnSize - 1;
|
c@42
|
169 config.want_c0 = true;
|
c@45
|
170 config.logpower = 1;
|
c@42
|
171 m_mfcc = new MFCC(config);
|
c@42
|
172 m_fftSize = m_mfcc->getfftlength();
|
c@47
|
173 m_rhythmClipFrameSize = m_fftSize / 4;
|
c@42
|
174
|
c@53
|
175 // std::cerr << "MFCC FS = " << config.FS << ", FFT size = " << m_fftSize<< std::endl;
|
c@43
|
176
|
c@42
|
177 } else if (m_type == TypeChroma) {
|
c@42
|
178
|
c@42
|
179 m_featureColumnSize = 12;
|
c@42
|
180
|
c@42
|
181 ChromaConfig config;
|
c@47
|
182 config.FS = m_processRate;
|
c@42
|
183 config.min = Pitch::getFrequencyForPitch(24, 0, 440);
|
c@42
|
184 config.max = Pitch::getFrequencyForPitch(96, 0, 440);
|
c@42
|
185 config.BPO = 12;
|
c@42
|
186 config.CQThresh = 0.0054;
|
c@49
|
187 // We don't normalise the chromagram's columns individually;
|
c@49
|
188 // we normalise the mean at the end instead
|
c@49
|
189 config.normalise = MathUtilities::NormaliseNone;
|
c@42
|
190 m_chromagram = new Chromagram(config);
|
c@42
|
191 m_fftSize = m_chromagram->getFrameSize();
|
c@42
|
192
|
c@53
|
193 // std::cerr << "fftsize = " << m_fftSize << std::endl;
|
c@47
|
194
|
c@47
|
195 m_rhythmClipFrameSize = m_fftSize / 16;
|
c@47
|
196 while (m_rhythmClipFrameSize < 512) m_rhythmClipFrameSize *= 2;
|
c@53
|
197 // std::cerr << "m_rhythmClipFrameSize = " << m_rhythmClipFrameSize << std::endl;
|
c@47
|
198
|
c@53
|
199 // std::cerr << "min = "<< config.min << ", max = " << config.max << std::endl;
|
c@42
|
200
|
c@42
|
201 } else {
|
c@42
|
202
|
c@42
|
203 std::cerr << "SimilarityPlugin::initialise: internal error: unknown type " << m_type << std::endl;
|
c@42
|
204 return false;
|
c@42
|
205 }
|
c@41
|
206
|
c@47
|
207 if (needRhythm()) {
|
c@47
|
208 m_rhythmClipFrames =
|
c@47
|
209 int(ceil((m_rhythmClipDuration * m_processRate)
|
c@47
|
210 / m_rhythmClipFrameSize));
|
c@60
|
211 // std::cerr << "SimilarityPlugin::initialise: rhythm clip requires "
|
c@60
|
212 // << m_rhythmClipFrames << " frames of size "
|
c@60
|
213 // << m_rhythmClipFrameSize << " at process rate "
|
c@60
|
214 // << m_processRate << " ( = "
|
c@60
|
215 // << (float(m_rhythmClipFrames * m_rhythmClipFrameSize) / m_processRate) << " sec )"
|
c@60
|
216 // << std::endl;
|
c@47
|
217
|
c@47
|
218 MFCCConfig config(m_processRate);
|
c@47
|
219 config.fftsize = m_rhythmClipFrameSize;
|
c@47
|
220 config.nceps = m_featureColumnSize - 1;
|
c@47
|
221 config.want_c0 = true;
|
c@47
|
222 config.logpower = 1;
|
c@47
|
223 config.window = RectangularWindow; // because no overlap
|
c@47
|
224 m_rhythmfcc = new MFCC(config);
|
c@47
|
225 }
|
c@47
|
226
|
c@41
|
227 for (int i = 0; i < m_channels; ++i) {
|
c@47
|
228
|
c@42
|
229 m_values.push_back(FeatureMatrix());
|
c@47
|
230
|
c@47
|
231 if (needRhythm()) {
|
c@47
|
232 m_rhythmValues.push_back(FeatureColumnQueue());
|
c@47
|
233 }
|
c@41
|
234 }
|
c@41
|
235
|
c@47
|
236 m_done = false;
|
c@47
|
237
|
c@41
|
238 return true;
|
c@41
|
239 }
|
c@41
|
240
|
c@41
|
241 void
|
c@41
|
242 SimilarityPlugin::reset()
|
c@41
|
243 {
|
c@57
|
244 for (int i = 0; i < m_values.size(); ++i) {
|
c@57
|
245 m_values[i].clear();
|
c@57
|
246 }
|
c@57
|
247
|
c@57
|
248 for (int i = 0; i < m_rhythmValues.size(); ++i) {
|
c@57
|
249 m_rhythmValues[i].clear();
|
c@57
|
250 }
|
c@57
|
251
|
c@60
|
252 for (int i = 0; i < m_lastNonEmptyFrame.size(); ++i) {
|
c@60
|
253 m_lastNonEmptyFrame[i] = -1;
|
c@60
|
254 }
|
c@60
|
255
|
c@60
|
256 for (int i = 0; i < m_emptyFrameCount.size(); ++i) {
|
c@60
|
257 m_emptyFrameCount[i] = 0;
|
c@60
|
258 }
|
c@60
|
259
|
c@47
|
260 m_done = false;
|
c@41
|
261 }
|
c@41
|
262
|
c@41
|
263 int
|
c@41
|
264 SimilarityPlugin::getDecimationFactor() const
|
c@41
|
265 {
|
c@41
|
266 int rate = lrintf(m_inputSampleRate);
|
c@47
|
267 return rate / m_processRate;
|
c@41
|
268 }
|
c@41
|
269
|
c@41
|
270 size_t
|
c@41
|
271 SimilarityPlugin::getPreferredStepSize() const
|
c@41
|
272 {
|
c@42
|
273 if (m_blockSize == 0) calculateBlockSize();
|
c@47
|
274
|
c@47
|
275 // there is also an assumption to this effect in process()
|
c@47
|
276 // (referring to m_fftSize/2 instead of a literal post-decimation
|
c@47
|
277 // step size):
|
c@45
|
278 return m_blockSize/2;
|
c@41
|
279 }
|
c@41
|
280
|
c@41
|
281 size_t
|
c@41
|
282 SimilarityPlugin::getPreferredBlockSize() const
|
c@41
|
283 {
|
c@42
|
284 if (m_blockSize == 0) calculateBlockSize();
|
c@42
|
285 return m_blockSize;
|
c@42
|
286 }
|
c@42
|
287
|
c@42
|
288 void
|
c@42
|
289 SimilarityPlugin::calculateBlockSize() const
|
c@42
|
290 {
|
c@42
|
291 if (m_blockSize != 0) return;
|
c@42
|
292 int decimationFactor = getDecimationFactor();
|
c@42
|
293 if (m_type == TypeChroma) {
|
c@42
|
294 ChromaConfig config;
|
c@47
|
295 config.FS = m_processRate;
|
c@42
|
296 config.min = Pitch::getFrequencyForPitch(24, 0, 440);
|
c@42
|
297 config.max = Pitch::getFrequencyForPitch(96, 0, 440);
|
c@42
|
298 config.BPO = 12;
|
c@42
|
299 config.CQThresh = 0.0054;
|
c@49
|
300 config.normalise = MathUtilities::NormaliseNone;
|
c@42
|
301 Chromagram *c = new Chromagram(config);
|
c@42
|
302 size_t sz = c->getFrameSize();
|
c@42
|
303 delete c;
|
c@42
|
304 m_blockSize = sz * decimationFactor;
|
c@42
|
305 } else {
|
c@42
|
306 m_blockSize = 2048 * decimationFactor;
|
c@42
|
307 }
|
c@41
|
308 }
|
c@41
|
309
|
c@41
|
310 SimilarityPlugin::ParameterList SimilarityPlugin::getParameterDescriptors() const
|
c@41
|
311 {
|
c@41
|
312 ParameterList list;
|
c@42
|
313
|
c@42
|
314 ParameterDescriptor desc;
|
c@42
|
315 desc.identifier = "featureType";
|
c@42
|
316 desc.name = "Feature Type";
|
c@48
|
317 desc.description = "Audio feature used for similarity measure. Timbral: use the first 20 MFCCs (19 plus C0). Chromatic: use 12 bin-per-octave chroma. Rhythmic: compare beat spectra of short regions.";
|
c@42
|
318 desc.unit = "";
|
c@42
|
319 desc.minValue = 0;
|
c@48
|
320 desc.maxValue = 4;
|
c@48
|
321 desc.defaultValue = 1;
|
c@42
|
322 desc.isQuantized = true;
|
c@42
|
323 desc.quantizeStep = 1;
|
c@48
|
324 desc.valueNames.push_back("Timbre");
|
c@48
|
325 desc.valueNames.push_back("Timbre and Rhythm");
|
c@48
|
326 desc.valueNames.push_back("Chroma");
|
c@48
|
327 desc.valueNames.push_back("Chroma and Rhythm");
|
c@48
|
328 desc.valueNames.push_back("Rhythm only");
|
c@42
|
329 list.push_back(desc);
|
c@48
|
330 /*
|
c@47
|
331 desc.identifier = "rhythmWeighting";
|
c@47
|
332 desc.name = "Influence of Rhythm";
|
c@47
|
333 desc.description = "Proportion of similarity measure made up from rhythmic similarity component, from 0 (entirely timbral or chromatic) to 100 (entirely rhythmic).";
|
c@47
|
334 desc.unit = "%";
|
c@47
|
335 desc.minValue = 0;
|
c@47
|
336 desc.maxValue = 100;
|
c@47
|
337 desc.defaultValue = 0;
|
c@48
|
338 desc.isQuantized = false;
|
c@47
|
339 desc.valueNames.clear();
|
c@47
|
340 list.push_back(desc);
|
c@48
|
341 */
|
c@41
|
342 return list;
|
c@41
|
343 }
|
c@41
|
344
|
c@41
|
345 float
|
c@41
|
346 SimilarityPlugin::getParameter(std::string param) const
|
c@41
|
347 {
|
c@42
|
348 if (param == "featureType") {
|
c@48
|
349
|
c@48
|
350 if (m_rhythmWeighting > m_allRhythm) {
|
c@48
|
351 return 4;
|
c@48
|
352 }
|
c@48
|
353
|
c@48
|
354 switch (m_type) {
|
c@48
|
355
|
c@48
|
356 case TypeMFCC:
|
c@48
|
357 if (m_rhythmWeighting < m_noRhythm) return 0;
|
c@48
|
358 else return 1;
|
c@48
|
359 break;
|
c@48
|
360
|
c@48
|
361 case TypeChroma:
|
c@48
|
362 if (m_rhythmWeighting < m_noRhythm) return 2;
|
c@48
|
363 else return 3;
|
c@48
|
364 break;
|
c@48
|
365 }
|
c@48
|
366
|
c@48
|
367 return 1;
|
c@48
|
368
|
c@48
|
369 // } else if (param == "rhythmWeighting") {
|
c@48
|
370 // return nearbyint(m_rhythmWeighting * 100.0);
|
c@42
|
371 }
|
c@42
|
372
|
c@41
|
373 std::cerr << "WARNING: SimilarityPlugin::getParameter: unknown parameter \""
|
c@41
|
374 << param << "\"" << std::endl;
|
c@41
|
375 return 0.0;
|
c@41
|
376 }
|
c@41
|
377
|
c@41
|
378 void
|
c@41
|
379 SimilarityPlugin::setParameter(std::string param, float value)
|
c@41
|
380 {
|
c@42
|
381 if (param == "featureType") {
|
c@48
|
382
|
c@42
|
383 int v = int(value + 0.1);
|
c@48
|
384
|
c@48
|
385 Type newType = m_type;
|
c@48
|
386
|
c@48
|
387 switch (v) {
|
c@48
|
388 case 0: newType = TypeMFCC; m_rhythmWeighting = 0.0f; break;
|
c@48
|
389 case 1: newType = TypeMFCC; m_rhythmWeighting = 0.5f; break;
|
c@48
|
390 case 2: newType = TypeChroma; m_rhythmWeighting = 0.0f; break;
|
c@48
|
391 case 3: newType = TypeChroma; m_rhythmWeighting = 0.5f; break;
|
c@48
|
392 case 4: newType = TypeMFCC; m_rhythmWeighting = 1.f; break;
|
c@48
|
393 }
|
c@48
|
394
|
c@48
|
395 if (newType != m_type) m_blockSize = 0;
|
c@48
|
396
|
c@48
|
397 m_type = newType;
|
c@42
|
398 return;
|
c@48
|
399
|
c@48
|
400 // } else if (param == "rhythmWeighting") {
|
c@48
|
401 // m_rhythmWeighting = value / 100;
|
c@48
|
402 // return;
|
c@42
|
403 }
|
c@42
|
404
|
c@41
|
405 std::cerr << "WARNING: SimilarityPlugin::setParameter: unknown parameter \""
|
c@41
|
406 << param << "\"" << std::endl;
|
c@41
|
407 }
|
c@41
|
408
|
c@41
|
409 SimilarityPlugin::OutputList
|
c@41
|
410 SimilarityPlugin::getOutputDescriptors() const
|
c@41
|
411 {
|
c@41
|
412 OutputList list;
|
c@41
|
413
|
c@41
|
414 OutputDescriptor similarity;
|
c@43
|
415 similarity.identifier = "distancematrix";
|
c@43
|
416 similarity.name = "Distance Matrix";
|
c@43
|
417 similarity.description = "Distance matrix for similarity metric. Smaller = more similar. Should be symmetrical.";
|
c@41
|
418 similarity.unit = "";
|
c@41
|
419 similarity.hasFixedBinCount = true;
|
c@41
|
420 similarity.binCount = m_channels;
|
c@41
|
421 similarity.hasKnownExtents = false;
|
c@41
|
422 similarity.isQuantized = false;
|
c@41
|
423 similarity.sampleType = OutputDescriptor::FixedSampleRate;
|
c@41
|
424 similarity.sampleRate = 1;
|
c@41
|
425
|
c@43
|
426 m_distanceMatrixOutput = list.size();
|
c@41
|
427 list.push_back(similarity);
|
c@41
|
428
|
c@43
|
429 OutputDescriptor simvec;
|
c@43
|
430 simvec.identifier = "distancevector";
|
c@43
|
431 simvec.name = "Distance from First Channel";
|
c@43
|
432 simvec.description = "Distance vector for similarity of each channel to the first channel. Smaller = more similar.";
|
c@43
|
433 simvec.unit = "";
|
c@43
|
434 simvec.hasFixedBinCount = true;
|
c@43
|
435 simvec.binCount = m_channels;
|
c@43
|
436 simvec.hasKnownExtents = false;
|
c@43
|
437 simvec.isQuantized = false;
|
c@43
|
438 simvec.sampleType = OutputDescriptor::FixedSampleRate;
|
c@43
|
439 simvec.sampleRate = 1;
|
c@43
|
440
|
c@43
|
441 m_distanceVectorOutput = list.size();
|
c@43
|
442 list.push_back(simvec);
|
c@43
|
443
|
c@44
|
444 OutputDescriptor sortvec;
|
c@44
|
445 sortvec.identifier = "sorteddistancevector";
|
c@44
|
446 sortvec.name = "Ordered Distances from First Channel";
|
c@44
|
447 sortvec.description = "Vector of the order of other channels in similarity to the first, followed by distance vector for similarity of each to the first. Smaller = more similar.";
|
c@44
|
448 sortvec.unit = "";
|
c@44
|
449 sortvec.hasFixedBinCount = true;
|
c@44
|
450 sortvec.binCount = m_channels;
|
c@44
|
451 sortvec.hasKnownExtents = false;
|
c@44
|
452 sortvec.isQuantized = false;
|
c@44
|
453 sortvec.sampleType = OutputDescriptor::FixedSampleRate;
|
c@44
|
454 sortvec.sampleRate = 1;
|
c@44
|
455
|
c@44
|
456 m_sortedVectorOutput = list.size();
|
c@44
|
457 list.push_back(sortvec);
|
c@44
|
458
|
c@41
|
459 OutputDescriptor means;
|
c@41
|
460 means.identifier = "means";
|
c@42
|
461 means.name = "Feature Means";
|
c@43
|
462 means.description = "Means of the feature bins. Feature time (sec) corresponds to input channel. Number of bins depends on selected feature type.";
|
c@41
|
463 means.unit = "";
|
c@41
|
464 means.hasFixedBinCount = true;
|
c@43
|
465 means.binCount = m_featureColumnSize;
|
c@41
|
466 means.hasKnownExtents = false;
|
c@41
|
467 means.isQuantized = false;
|
c@43
|
468 means.sampleType = OutputDescriptor::FixedSampleRate;
|
c@43
|
469 means.sampleRate = 1;
|
c@41
|
470
|
c@43
|
471 m_meansOutput = list.size();
|
c@41
|
472 list.push_back(means);
|
c@41
|
473
|
c@41
|
474 OutputDescriptor variances;
|
c@41
|
475 variances.identifier = "variances";
|
c@42
|
476 variances.name = "Feature Variances";
|
c@43
|
477 variances.description = "Variances of the feature bins. Feature time (sec) corresponds to input channel. Number of bins depends on selected feature type.";
|
c@41
|
478 variances.unit = "";
|
c@41
|
479 variances.hasFixedBinCount = true;
|
c@43
|
480 variances.binCount = m_featureColumnSize;
|
c@41
|
481 variances.hasKnownExtents = false;
|
c@41
|
482 variances.isQuantized = false;
|
c@43
|
483 variances.sampleType = OutputDescriptor::FixedSampleRate;
|
c@43
|
484 variances.sampleRate = 1;
|
c@41
|
485
|
c@43
|
486 m_variancesOutput = list.size();
|
c@41
|
487 list.push_back(variances);
|
c@41
|
488
|
c@47
|
489 OutputDescriptor beatspectrum;
|
c@47
|
490 beatspectrum.identifier = "beatspectrum";
|
c@47
|
491 beatspectrum.name = "Beat Spectra";
|
c@47
|
492 beatspectrum.description = "Rhythmic self-similarity vectors (beat spectra) for the input channels. Feature time (sec) corresponds to input channel. Not returned if rhythm weighting is zero.";
|
c@47
|
493 beatspectrum.unit = "";
|
c@47
|
494 if (m_rhythmClipFrames > 0) {
|
c@47
|
495 beatspectrum.hasFixedBinCount = true;
|
c@47
|
496 beatspectrum.binCount = m_rhythmClipFrames / 2;
|
c@47
|
497 } else {
|
c@47
|
498 beatspectrum.hasFixedBinCount = false;
|
c@47
|
499 }
|
c@47
|
500 beatspectrum.hasKnownExtents = false;
|
c@47
|
501 beatspectrum.isQuantized = false;
|
c@47
|
502 beatspectrum.sampleType = OutputDescriptor::FixedSampleRate;
|
c@47
|
503 beatspectrum.sampleRate = 1;
|
c@47
|
504
|
c@47
|
505 m_beatSpectraOutput = list.size();
|
c@47
|
506 list.push_back(beatspectrum);
|
c@47
|
507
|
c@41
|
508 return list;
|
c@41
|
509 }
|
c@41
|
510
|
c@41
|
511 SimilarityPlugin::FeatureSet
|
c@41
|
512 SimilarityPlugin::process(const float *const *inputBuffers, Vamp::RealTime /* timestamp */)
|
c@41
|
513 {
|
c@47
|
514 if (m_done) {
|
c@47
|
515 return FeatureSet();
|
c@47
|
516 }
|
c@47
|
517
|
c@41
|
518 double *dblbuf = new double[m_blockSize];
|
c@41
|
519 double *decbuf = dblbuf;
|
c@42
|
520 if (m_decimator) decbuf = new double[m_fftSize];
|
c@42
|
521
|
c@47
|
522 double *raw = new double[std::max(m_featureColumnSize,
|
c@47
|
523 m_rhythmColumnSize)];
|
c@41
|
524
|
c@43
|
525 float threshold = 1e-10;
|
c@43
|
526
|
c@47
|
527 bool someRhythmFrameNeeded = false;
|
c@47
|
528
|
c@41
|
529 for (size_t c = 0; c < m_channels; ++c) {
|
c@41
|
530
|
c@43
|
531 bool empty = true;
|
c@43
|
532
|
c@41
|
533 for (int i = 0; i < m_blockSize; ++i) {
|
c@43
|
534 float val = inputBuffers[c][i];
|
c@43
|
535 if (fabs(val) > threshold) empty = false;
|
c@43
|
536 dblbuf[i] = val;
|
c@41
|
537 }
|
c@41
|
538
|
c@47
|
539 if (empty) {
|
c@47
|
540 if (needRhythm() && ((m_frameNo % 2) == 0)) {
|
c@47
|
541 for (int i = 0; i < m_fftSize / m_rhythmClipFrameSize; ++i) {
|
c@47
|
542 if (m_rhythmValues[c].size() < m_rhythmClipFrames) {
|
c@47
|
543 FeatureColumn mf(m_rhythmColumnSize);
|
c@47
|
544 for (int i = 0; i < m_rhythmColumnSize; ++i) {
|
c@47
|
545 mf[i] = 0.0;
|
c@47
|
546 }
|
c@47
|
547 m_rhythmValues[c].push_back(mf);
|
c@47
|
548 }
|
c@47
|
549 }
|
c@47
|
550 }
|
c@60
|
551 m_emptyFrameCount[c]++;
|
c@47
|
552 continue;
|
c@47
|
553 }
|
c@47
|
554
|
c@44
|
555 m_lastNonEmptyFrame[c] = m_frameNo;
|
c@43
|
556
|
c@41
|
557 if (m_decimator) {
|
c@41
|
558 m_decimator->process(dblbuf, decbuf);
|
c@41
|
559 }
|
c@42
|
560
|
c@47
|
561 if (needTimbre()) {
|
c@47
|
562
|
c@47
|
563 if (m_type == TypeMFCC) {
|
c@47
|
564 m_mfcc->process(decbuf, raw);
|
c@47
|
565 } else if (m_type == TypeChroma) {
|
c@47
|
566 raw = m_chromagram->process(decbuf);
|
c@47
|
567 }
|
c@41
|
568
|
c@47
|
569 FeatureColumn mf(m_featureColumnSize);
|
c@47
|
570 for (int i = 0; i < m_featureColumnSize; ++i) {
|
c@47
|
571 mf[i] = raw[i];
|
c@47
|
572 }
|
c@47
|
573
|
c@47
|
574 m_values[c].push_back(mf);
|
c@44
|
575 }
|
c@41
|
576
|
c@47
|
577 // std::cerr << "needRhythm = " << needRhythm() << ", frame = " << m_frameNo << std::endl;
|
c@47
|
578
|
c@47
|
579 if (needRhythm() && ((m_frameNo % 2) == 0)) {
|
c@47
|
580
|
c@47
|
581 // The incoming frames are overlapping; we only use every
|
c@47
|
582 // other one, because we don't want the overlap (it would
|
c@47
|
583 // screw up the rhythm)
|
c@47
|
584
|
c@47
|
585 int frameOffset = 0;
|
c@47
|
586
|
c@47
|
587 while (frameOffset + m_rhythmClipFrameSize <= m_fftSize) {
|
c@47
|
588
|
c@47
|
589 bool needRhythmFrame = true;
|
c@47
|
590
|
c@47
|
591 if (m_rhythmValues[c].size() >= m_rhythmClipFrames) {
|
c@47
|
592
|
c@47
|
593 needRhythmFrame = false;
|
c@47
|
594
|
c@47
|
595 // assumes hopsize = framesize/2
|
c@47
|
596 float current = m_frameNo * (m_fftSize/2) + frameOffset;
|
c@47
|
597 current = current / m_processRate;
|
c@47
|
598 if (current - m_rhythmClipDuration < m_rhythmClipOrigin) {
|
c@47
|
599 needRhythmFrame = true;
|
c@47
|
600 m_rhythmValues[c].pop_front();
|
c@47
|
601 }
|
c@47
|
602
|
c@53
|
603 // if (needRhythmFrame) {
|
c@53
|
604 // std::cerr << "at current = " <<current << " (frame = " << m_frameNo << "), have " << m_rhythmValues[c].size() << ", need rhythm = " << needRhythmFrame << std::endl;
|
c@53
|
605 // }
|
c@47
|
606
|
c@47
|
607 }
|
c@47
|
608
|
c@47
|
609 if (needRhythmFrame) {
|
c@47
|
610
|
c@47
|
611 someRhythmFrameNeeded = true;
|
c@47
|
612
|
c@47
|
613 m_rhythmfcc->process(decbuf + frameOffset, raw);
|
c@47
|
614
|
c@47
|
615 FeatureColumn mf(m_rhythmColumnSize);
|
c@47
|
616 for (int i = 0; i < m_rhythmColumnSize; ++i) {
|
c@47
|
617 mf[i] = raw[i];
|
c@47
|
618 }
|
c@47
|
619
|
c@47
|
620 m_rhythmValues[c].push_back(mf);
|
c@47
|
621 }
|
c@47
|
622
|
c@47
|
623 frameOffset += m_rhythmClipFrameSize;
|
c@47
|
624 }
|
c@47
|
625 }
|
c@47
|
626 }
|
c@47
|
627
|
c@47
|
628 if (!needTimbre() && !someRhythmFrameNeeded && ((m_frameNo % 2) == 0)) {
|
c@53
|
629 // std::cerr << "done!" << std::endl;
|
c@47
|
630 m_done = true;
|
c@41
|
631 }
|
c@41
|
632
|
c@41
|
633 if (m_decimator) delete[] decbuf;
|
c@41
|
634 delete[] dblbuf;
|
c@47
|
635 delete[] raw;
|
c@41
|
636
|
c@44
|
637 ++m_frameNo;
|
c@44
|
638
|
c@41
|
639 return FeatureSet();
|
c@41
|
640 }
|
c@41
|
641
|
c@47
|
642 SimilarityPlugin::FeatureMatrix
|
c@47
|
643 SimilarityPlugin::calculateTimbral(FeatureSet &returnFeatures)
|
c@41
|
644 {
|
c@47
|
645 FeatureMatrix m(m_channels); // means
|
c@47
|
646 FeatureMatrix v(m_channels); // variances
|
c@41
|
647
|
c@41
|
648 for (int i = 0; i < m_channels; ++i) {
|
c@41
|
649
|
c@42
|
650 FeatureColumn mean(m_featureColumnSize), variance(m_featureColumnSize);
|
c@41
|
651
|
c@42
|
652 for (int j = 0; j < m_featureColumnSize; ++j) {
|
c@41
|
653
|
c@43
|
654 mean[j] = 0.0;
|
c@43
|
655 variance[j] = 0.0;
|
c@41
|
656 int count;
|
c@41
|
657
|
c@44
|
658 // We want to take values up to, but not including, the
|
c@44
|
659 // last non-empty frame (which may be partial)
|
c@43
|
660
|
c@60
|
661 int sz = m_lastNonEmptyFrame[i] - m_emptyFrameCount[i];
|
c@44
|
662 if (sz < 0) sz = 0;
|
c@60
|
663 if (sz >= m_values[i].size()) sz = m_values[i].size()-1;
|
c@43
|
664
|
c@41
|
665 count = 0;
|
c@43
|
666 for (int k = 0; k < sz; ++k) {
|
c@42
|
667 double val = m_values[i][k][j];
|
c@41
|
668 if (isnan(val) || isinf(val)) continue;
|
c@41
|
669 mean[j] += val;
|
c@41
|
670 ++count;
|
c@41
|
671 }
|
c@41
|
672 if (count > 0) mean[j] /= count;
|
c@41
|
673
|
c@41
|
674 count = 0;
|
c@43
|
675 for (int k = 0; k < sz; ++k) {
|
c@42
|
676 double val = ((m_values[i][k][j] - mean[j]) *
|
c@42
|
677 (m_values[i][k][j] - mean[j]));
|
c@41
|
678 if (isnan(val) || isinf(val)) continue;
|
c@41
|
679 variance[j] += val;
|
c@41
|
680 ++count;
|
c@41
|
681 }
|
c@41
|
682 if (count > 0) variance[j] /= count;
|
c@41
|
683 }
|
c@41
|
684
|
c@41
|
685 m[i] = mean;
|
c@41
|
686 v[i] = variance;
|
c@41
|
687 }
|
c@41
|
688
|
c@47
|
689 FeatureMatrix distances(m_channels);
|
c@42
|
690
|
c@48
|
691 if (m_type == TypeMFCC) {
|
c@48
|
692
|
c@48
|
693 // "Despite the fact that MFCCs extracted from music are
|
c@48
|
694 // clearly not Gaussian, [14] showed, somewhat surprisingly,
|
c@48
|
695 // that a similarity function comparing single Gaussians
|
c@48
|
696 // modelling MFCCs for each track can perform as well as
|
c@48
|
697 // mixture models. A great advantage of using single
|
c@48
|
698 // Gaussians is that a simple closed form exists for the KL
|
c@48
|
699 // divergence." -- Mark Levy, "Lightweight measures for
|
c@48
|
700 // timbral similarity of musical audio"
|
c@48
|
701 // (http://www.elec.qmul.ac.uk/easaier/papers/mlevytimbralsimilarity.pdf)
|
c@48
|
702
|
c@48
|
703 KLDivergence kld;
|
c@48
|
704
|
c@48
|
705 for (int i = 0; i < m_channels; ++i) {
|
c@48
|
706 for (int j = 0; j < m_channels; ++j) {
|
c@48
|
707 double d = kld.distanceGaussian(m[i], v[i], m[j], v[j]);
|
c@48
|
708 distances[i].push_back(d);
|
c@48
|
709 }
|
c@48
|
710 }
|
c@48
|
711
|
c@48
|
712 } else {
|
c@48
|
713
|
c@49
|
714 // We use the KL divergence for distributions of discrete
|
c@49
|
715 // variables, as chroma are histograms already. Or at least,
|
c@49
|
716 // they will be when we've normalised them like this:
|
c@49
|
717 for (int i = 0; i < m_channels; ++i) {
|
c@49
|
718 MathUtilities::normalise(m[i], MathUtilities::NormaliseUnitSum);
|
c@49
|
719 }
|
c@48
|
720
|
c@48
|
721 KLDivergence kld;
|
c@48
|
722
|
c@48
|
723 for (int i = 0; i < m_channels; ++i) {
|
c@48
|
724 for (int j = 0; j < m_channels; ++j) {
|
c@48
|
725 double d = kld.distanceDistribution(m[i], m[j], true);
|
c@48
|
726 distances[i].push_back(d);
|
c@48
|
727 }
|
c@41
|
728 }
|
c@41
|
729 }
|
c@47
|
730
|
c@44
|
731 Feature feature;
|
c@44
|
732 feature.hasTimestamp = true;
|
c@44
|
733
|
c@44
|
734 char labelBuffer[100];
|
c@43
|
735
|
c@41
|
736 for (int i = 0; i < m_channels; ++i) {
|
c@41
|
737
|
c@41
|
738 feature.timestamp = Vamp::RealTime(i, 0);
|
c@41
|
739
|
c@44
|
740 sprintf(labelBuffer, "Means for channel %d", i+1);
|
c@44
|
741 feature.label = labelBuffer;
|
c@44
|
742
|
c@41
|
743 feature.values.clear();
|
c@42
|
744 for (int k = 0; k < m_featureColumnSize; ++k) {
|
c@41
|
745 feature.values.push_back(m[i][k]);
|
c@41
|
746 }
|
c@41
|
747
|
c@43
|
748 returnFeatures[m_meansOutput].push_back(feature);
|
c@41
|
749
|
c@44
|
750 sprintf(labelBuffer, "Variances for channel %d", i+1);
|
c@44
|
751 feature.label = labelBuffer;
|
c@44
|
752
|
c@41
|
753 feature.values.clear();
|
c@42
|
754 for (int k = 0; k < m_featureColumnSize; ++k) {
|
c@41
|
755 feature.values.push_back(v[i][k]);
|
c@41
|
756 }
|
c@41
|
757
|
c@43
|
758 returnFeatures[m_variancesOutput].push_back(feature);
|
c@47
|
759 }
|
c@47
|
760
|
c@47
|
761 return distances;
|
c@47
|
762 }
|
c@47
|
763
|
c@47
|
764 SimilarityPlugin::FeatureMatrix
|
c@47
|
765 SimilarityPlugin::calculateRhythmic(FeatureSet &returnFeatures)
|
c@47
|
766 {
|
c@47
|
767 if (!needRhythm()) return FeatureMatrix();
|
c@47
|
768
|
c@60
|
769 // std::cerr << "SimilarityPlugin::initialise: rhythm clip for channel 0 contains "
|
c@60
|
770 // << m_rhythmValues[0].size() << " frames of size "
|
c@60
|
771 // << m_rhythmClipFrameSize << " at process rate "
|
c@60
|
772 // << m_processRate << " ( = "
|
c@60
|
773 // << (float(m_rhythmValues[0].size() * m_rhythmClipFrameSize) / m_processRate) << " sec )"
|
c@60
|
774 // << std::endl;
|
c@60
|
775
|
c@47
|
776 BeatSpectrum bscalc;
|
c@47
|
777 CosineDistance cd;
|
c@47
|
778
|
c@47
|
779 // Our rhythm feature matrix is a deque of vectors for practical
|
c@47
|
780 // reasons, but BeatSpectrum::process wants a vector of vectors
|
c@47
|
781 // (which is what FeatureMatrix happens to be).
|
c@47
|
782
|
c@47
|
783 FeatureMatrixSet bsinput(m_channels);
|
c@47
|
784 for (int i = 0; i < m_channels; ++i) {
|
c@47
|
785 for (int j = 0; j < m_rhythmValues[i].size(); ++j) {
|
c@47
|
786 bsinput[i].push_back(m_rhythmValues[i][j]);
|
c@47
|
787 }
|
c@47
|
788 }
|
c@47
|
789
|
c@47
|
790 FeatureMatrix bs(m_channels);
|
c@47
|
791 for (int i = 0; i < m_channels; ++i) {
|
c@47
|
792 bs[i] = bscalc.process(bsinput[i]);
|
c@47
|
793 }
|
c@47
|
794
|
c@47
|
795 FeatureMatrix distances(m_channels);
|
c@47
|
796 for (int i = 0; i < m_channels; ++i) {
|
c@47
|
797 for (int j = 0; j < m_channels; ++j) {
|
c@47
|
798 double d = cd.distance(bs[i], bs[j]);
|
c@47
|
799 distances[i].push_back(d);
|
c@47
|
800 }
|
c@47
|
801 }
|
c@47
|
802
|
c@47
|
803 Feature feature;
|
c@47
|
804 feature.hasTimestamp = true;
|
c@47
|
805
|
c@47
|
806 char labelBuffer[100];
|
c@47
|
807
|
c@47
|
808 for (int i = 0; i < m_channels; ++i) {
|
c@47
|
809
|
c@47
|
810 feature.timestamp = Vamp::RealTime(i, 0);
|
c@47
|
811
|
c@47
|
812 sprintf(labelBuffer, "Beat spectrum for channel %d", i+1);
|
c@47
|
813 feature.label = labelBuffer;
|
c@47
|
814
|
c@47
|
815 feature.values.clear();
|
c@47
|
816 for (int j = 0; j < bs[i].size(); ++j) {
|
c@47
|
817 feature.values.push_back(bs[i][j]);
|
c@47
|
818 }
|
c@47
|
819
|
c@47
|
820 returnFeatures[m_beatSpectraOutput].push_back(feature);
|
c@47
|
821 }
|
c@47
|
822
|
c@47
|
823 return distances;
|
c@47
|
824 }
|
c@47
|
825
|
c@47
|
826 double
|
c@47
|
827 SimilarityPlugin::getDistance(const FeatureMatrix &timbral,
|
c@47
|
828 const FeatureMatrix &rhythmic,
|
c@47
|
829 int i, int j)
|
c@47
|
830 {
|
c@47
|
831 double distance = 1.0;
|
c@47
|
832 if (needTimbre()) distance *= timbral[i][j];
|
c@47
|
833 if (needRhythm()) distance *= rhythmic[i][j];
|
c@47
|
834 return distance;
|
c@47
|
835 }
|
c@47
|
836
|
c@47
|
837 SimilarityPlugin::FeatureSet
|
c@47
|
838 SimilarityPlugin::getRemainingFeatures()
|
c@47
|
839 {
|
c@47
|
840 FeatureSet returnFeatures;
|
c@47
|
841
|
c@47
|
842 // We want to return a matrix of the distances between channels,
|
c@47
|
843 // but Vamp doesn't have a matrix return type so we will actually
|
c@47
|
844 // return a series of vectors
|
c@47
|
845
|
c@47
|
846 FeatureMatrix timbralDistances, rhythmicDistances;
|
c@47
|
847
|
c@47
|
848 if (needTimbre()) {
|
c@47
|
849 timbralDistances = calculateTimbral(returnFeatures);
|
c@47
|
850 }
|
c@47
|
851
|
c@47
|
852 if (needRhythm()) {
|
c@47
|
853 rhythmicDistances = calculateRhythmic(returnFeatures);
|
c@47
|
854 }
|
c@47
|
855
|
c@47
|
856 // We give all features a timestamp, otherwise hosts will tend to
|
c@47
|
857 // stamp them at the end of the file, which is annoying
|
c@47
|
858
|
c@47
|
859 Feature feature;
|
c@47
|
860 feature.hasTimestamp = true;
|
c@47
|
861
|
c@47
|
862 Feature distanceVectorFeature;
|
c@47
|
863 distanceVectorFeature.label = "Distance from first channel";
|
c@47
|
864 distanceVectorFeature.hasTimestamp = true;
|
c@47
|
865 distanceVectorFeature.timestamp = Vamp::RealTime::zeroTime;
|
c@47
|
866
|
c@47
|
867 std::map<double, int> sorted;
|
c@47
|
868
|
c@47
|
869 char labelBuffer[100];
|
c@47
|
870
|
c@47
|
871 for (int i = 0; i < m_channels; ++i) {
|
c@47
|
872
|
c@47
|
873 feature.timestamp = Vamp::RealTime(i, 0);
|
c@41
|
874
|
c@41
|
875 feature.values.clear();
|
c@41
|
876 for (int j = 0; j < m_channels; ++j) {
|
c@47
|
877 double dist = getDistance(timbralDistances, rhythmicDistances, i, j);
|
c@47
|
878 feature.values.push_back(dist);
|
c@41
|
879 }
|
c@43
|
880
|
c@44
|
881 sprintf(labelBuffer, "Distances from channel %d", i+1);
|
c@44
|
882 feature.label = labelBuffer;
|
c@41
|
883
|
c@43
|
884 returnFeatures[m_distanceMatrixOutput].push_back(feature);
|
c@43
|
885
|
c@47
|
886 double fromFirst =
|
c@47
|
887 getDistance(timbralDistances, rhythmicDistances, 0, i);
|
c@44
|
888
|
c@47
|
889 distanceVectorFeature.values.push_back(fromFirst);
|
c@47
|
890 sorted[fromFirst] = i;
|
c@41
|
891 }
|
c@41
|
892
|
c@43
|
893 returnFeatures[m_distanceVectorOutput].push_back(distanceVectorFeature);
|
c@43
|
894
|
c@44
|
895 feature.label = "Order of channels by similarity to first channel";
|
c@44
|
896 feature.values.clear();
|
c@44
|
897 feature.timestamp = Vamp::RealTime(0, 0);
|
c@44
|
898
|
c@44
|
899 for (std::map<double, int>::iterator i = sorted.begin();
|
c@44
|
900 i != sorted.end(); ++i) {
|
c@45
|
901 feature.values.push_back(i->second + 1);
|
c@44
|
902 }
|
c@44
|
903
|
c@44
|
904 returnFeatures[m_sortedVectorOutput].push_back(feature);
|
c@44
|
905
|
c@44
|
906 feature.label = "Ordered distances of channels from first channel";
|
c@44
|
907 feature.values.clear();
|
c@44
|
908 feature.timestamp = Vamp::RealTime(1, 0);
|
c@44
|
909
|
c@44
|
910 for (std::map<double, int>::iterator i = sorted.begin();
|
c@44
|
911 i != sorted.end(); ++i) {
|
c@44
|
912 feature.values.push_back(i->first);
|
c@44
|
913 }
|
c@44
|
914
|
c@44
|
915 returnFeatures[m_sortedVectorOutput].push_back(feature);
|
c@44
|
916
|
c@41
|
917 return returnFeatures;
|
c@41
|
918 }
|