Mercurial > hg > svcore
comparison transform/FeatureExtractionModelTransformer.cpp @ 388:370aa9714ef5
* Move plugin/transform to plain transform. This way transform can depend on
model and GUI classes, but plugin doesn't have to.
author | Chris Cannam |
---|---|
date | Wed, 12 Mar 2008 18:02:17 +0000 |
parents | plugin/transform/FeatureExtractionModelTransformer.cpp@7aa1de571880 |
children | 115f60df1e4d |
comparison
equal
deleted
inserted
replaced
387:7aa1de571880 | 388:370aa9714ef5 |
---|---|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ | |
2 | |
3 /* | |
4 Sonic Visualiser | |
5 An audio file viewer and annotation editor. | |
6 Centre for Digital Music, Queen Mary, University of London. | |
7 This file copyright 2006 Chris Cannam and QMUL. | |
8 | |
9 This program is free software; you can redistribute it and/or | |
10 modify it under the terms of the GNU General Public License as | |
11 published by the Free Software Foundation; either version 2 of the | |
12 License, or (at your option) any later version. See the file | |
13 COPYING included with this distribution for more information. | |
14 */ | |
15 | |
16 #include "FeatureExtractionModelTransformer.h" | |
17 | |
18 #include "plugin/FeatureExtractionPluginFactory.h" | |
19 #include "plugin/PluginXml.h" | |
20 #include "vamp-sdk/Plugin.h" | |
21 | |
22 #include "data/model/Model.h" | |
23 #include "base/Window.h" | |
24 #include "base/Exceptions.h" | |
25 #include "data/model/SparseOneDimensionalModel.h" | |
26 #include "data/model/SparseTimeValueModel.h" | |
27 #include "data/model/EditableDenseThreeDimensionalModel.h" | |
28 #include "data/model/DenseTimeValueModel.h" | |
29 #include "data/model/NoteModel.h" | |
30 #include "data/model/FFTModel.h" | |
31 #include "data/model/WaveFileModel.h" | |
32 | |
33 #include "TransformFactory.h" | |
34 | |
35 #include <iostream> | |
36 | |
37 FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Input in, | |
38 const Transform &transform) : | |
39 ModelTransformer(in, transform), | |
40 m_plugin(0), | |
41 m_descriptor(0), | |
42 m_outputFeatureNo(0) | |
43 { | |
44 // std::cerr << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: plugin " << pluginId.toStdString() << ", outputName " << m_transform.getOutput().toStdString() << std::endl; | |
45 | |
46 QString pluginId = transform.getPluginIdentifier(); | |
47 | |
48 FeatureExtractionPluginFactory *factory = | |
49 FeatureExtractionPluginFactory::instanceFor(pluginId); | |
50 | |
51 if (!factory) { | |
52 m_message = tr("No factory available for feature extraction plugin id \"%1\" (unknown plugin type, or internal error?)").arg(pluginId); | |
53 return; | |
54 } | |
55 | |
56 DenseTimeValueModel *input = getConformingInput(); | |
57 if (!input) { | |
58 m_message = tr("Input model for feature extraction plugin \"%1\" is of wrong type (internal error?)").arg(pluginId); | |
59 return; | |
60 } | |
61 | |
62 m_plugin = factory->instantiatePlugin(pluginId, input->getSampleRate()); | |
63 if (!m_plugin) { | |
64 m_message = tr("Failed to instantiate plugin \"%1\"").arg(pluginId); | |
65 return; | |
66 } | |
67 | |
68 TransformFactory::getInstance()->makeContextConsistentWithPlugin | |
69 (m_transform, m_plugin); | |
70 | |
71 TransformFactory::getInstance()->setPluginParameters | |
72 (m_transform, m_plugin); | |
73 | |
74 size_t channelCount = input->getChannelCount(); | |
75 if (m_plugin->getMaxChannelCount() < channelCount) { | |
76 channelCount = 1; | |
77 } | |
78 if (m_plugin->getMinChannelCount() > channelCount) { | |
79 m_message = tr("Cannot provide enough channels to feature extraction plugin \"%1\" (plugin min is %2, max %3; input model has %4)") | |
80 .arg(pluginId) | |
81 .arg(m_plugin->getMinChannelCount()) | |
82 .arg(m_plugin->getMaxChannelCount()) | |
83 .arg(input->getChannelCount()); | |
84 return; | |
85 } | |
86 | |
87 std::cerr << "Initialising feature extraction plugin with channels = " | |
88 << channelCount << ", step = " << m_transform.getStepSize() | |
89 << ", block = " << m_transform.getBlockSize() << std::endl; | |
90 | |
91 if (!m_plugin->initialise(channelCount, | |
92 m_transform.getStepSize(), | |
93 m_transform.getBlockSize())) { | |
94 | |
95 size_t pstep = m_transform.getStepSize(); | |
96 size_t pblock = m_transform.getBlockSize(); | |
97 | |
98 m_transform.setStepSize(0); | |
99 m_transform.setBlockSize(0); | |
100 TransformFactory::getInstance()->makeContextConsistentWithPlugin | |
101 (m_transform, m_plugin); | |
102 | |
103 if (m_transform.getStepSize() != pstep || | |
104 m_transform.getBlockSize() != pblock) { | |
105 | |
106 if (!m_plugin->initialise(channelCount, | |
107 m_transform.getStepSize(), | |
108 m_transform.getBlockSize())) { | |
109 | |
110 m_message = tr("Failed to initialise feature extraction plugin \"%1\"").arg(pluginId); | |
111 return; | |
112 | |
113 } else { | |
114 | |
115 m_message = tr("Feature extraction plugin \"%1\" rejected the given step and block sizes (%2 and %3); using plugin defaults (%4 and %5) instead") | |
116 .arg(pluginId) | |
117 .arg(pstep) | |
118 .arg(pblock) | |
119 .arg(m_transform.getStepSize()) | |
120 .arg(m_transform.getBlockSize()); | |
121 } | |
122 | |
123 } else { | |
124 | |
125 m_message = tr("Failed to initialise feature extraction plugin \"%1\"").arg(pluginId); | |
126 return; | |
127 } | |
128 } | |
129 | |
130 if (m_transform.getPluginVersion() != "") { | |
131 QString pv = QString("%1").arg(m_plugin->getPluginVersion()); | |
132 if (pv != m_transform.getPluginVersion()) { | |
133 QString vm = tr("Transform was configured for version %1 of plugin \"%2\", but the plugin being used is version %3") | |
134 .arg(m_transform.getPluginVersion()) | |
135 .arg(pluginId) | |
136 .arg(pv); | |
137 if (m_message != "") { | |
138 m_message = QString("%1; %2").arg(vm).arg(m_message); | |
139 } else { | |
140 m_message = vm; | |
141 } | |
142 } | |
143 } | |
144 | |
145 Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors(); | |
146 | |
147 if (outputs.empty()) { | |
148 m_message = tr("Plugin \"%1\" has no outputs").arg(pluginId); | |
149 return; | |
150 } | |
151 | |
152 for (size_t i = 0; i < outputs.size(); ++i) { | |
153 if (m_transform.getOutput() == "" || | |
154 outputs[i].identifier == m_transform.getOutput().toStdString()) { | |
155 m_outputFeatureNo = i; | |
156 m_descriptor = new Vamp::Plugin::OutputDescriptor | |
157 (outputs[i]); | |
158 break; | |
159 } | |
160 } | |
161 | |
162 if (!m_descriptor) { | |
163 m_message = tr("Plugin \"%1\" has no output named \"%2\"") | |
164 .arg(pluginId) | |
165 .arg(m_transform.getOutput()); | |
166 return; | |
167 } | |
168 | |
169 // std::cerr << "FeatureExtractionModelTransformer: output sample type " | |
170 // << m_descriptor->sampleType << std::endl; | |
171 | |
172 int binCount = 1; | |
173 float minValue = 0.0, maxValue = 0.0; | |
174 bool haveExtents = false; | |
175 | |
176 if (m_descriptor->hasFixedBinCount) { | |
177 binCount = m_descriptor->binCount; | |
178 } | |
179 | |
180 // std::cerr << "FeatureExtractionModelTransformer: output bin count " | |
181 // << binCount << std::endl; | |
182 | |
183 if (binCount > 0 && m_descriptor->hasKnownExtents) { | |
184 minValue = m_descriptor->minValue; | |
185 maxValue = m_descriptor->maxValue; | |
186 haveExtents = true; | |
187 } | |
188 | |
189 size_t modelRate = input->getSampleRate(); | |
190 size_t modelResolution = 1; | |
191 | |
192 switch (m_descriptor->sampleType) { | |
193 | |
194 case Vamp::Plugin::OutputDescriptor::VariableSampleRate: | |
195 if (m_descriptor->sampleRate != 0.0) { | |
196 modelResolution = size_t(modelRate / m_descriptor->sampleRate + 0.001); | |
197 } | |
198 break; | |
199 | |
200 case Vamp::Plugin::OutputDescriptor::OneSamplePerStep: | |
201 modelResolution = m_transform.getStepSize(); | |
202 break; | |
203 | |
204 case Vamp::Plugin::OutputDescriptor::FixedSampleRate: | |
205 modelRate = size_t(m_descriptor->sampleRate + 0.001); | |
206 break; | |
207 } | |
208 | |
209 if (binCount == 0) { | |
210 | |
211 m_output = new SparseOneDimensionalModel(modelRate, modelResolution, | |
212 false); | |
213 | |
214 } else if (binCount == 1) { | |
215 | |
216 SparseTimeValueModel *model; | |
217 if (haveExtents) { | |
218 model = new SparseTimeValueModel | |
219 (modelRate, modelResolution, minValue, maxValue, false); | |
220 } else { | |
221 model = new SparseTimeValueModel | |
222 (modelRate, modelResolution, false); | |
223 } | |
224 model->setScaleUnits(outputs[m_outputFeatureNo].unit.c_str()); | |
225 | |
226 m_output = model; | |
227 | |
228 } else if (m_descriptor->sampleType == | |
229 Vamp::Plugin::OutputDescriptor::VariableSampleRate) { | |
230 | |
231 // We don't have a sparse 3D model, so interpret this as a | |
232 // note model. There's nothing to define which values to use | |
233 // as which parameters of the note -- for the moment let's | |
234 // treat the first as pitch, second as duration in frames, | |
235 // third (if present) as velocity. (Our note model doesn't | |
236 // yet store velocity.) | |
237 //!!! todo: ask the user! | |
238 | |
239 NoteModel *model; | |
240 if (haveExtents) { | |
241 model = new NoteModel | |
242 (modelRate, modelResolution, minValue, maxValue, false); | |
243 } else { | |
244 model = new NoteModel | |
245 (modelRate, modelResolution, false); | |
246 } | |
247 model->setScaleUnits(outputs[m_outputFeatureNo].unit.c_str()); | |
248 | |
249 m_output = model; | |
250 | |
251 } else { | |
252 | |
253 EditableDenseThreeDimensionalModel *model = | |
254 new EditableDenseThreeDimensionalModel | |
255 (modelRate, modelResolution, binCount, false); | |
256 | |
257 if (!m_descriptor->binNames.empty()) { | |
258 std::vector<QString> names; | |
259 for (size_t i = 0; i < m_descriptor->binNames.size(); ++i) { | |
260 names.push_back(m_descriptor->binNames[i].c_str()); | |
261 } | |
262 model->setBinNames(names); | |
263 } | |
264 | |
265 m_output = model; | |
266 } | |
267 | |
268 if (m_output) m_output->setSourceModel(input); | |
269 } | |
270 | |
271 FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer() | |
272 { | |
273 std::cerr << "FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer()" << std::endl; | |
274 delete m_plugin; | |
275 delete m_descriptor; | |
276 } | |
277 | |
278 DenseTimeValueModel * | |
279 FeatureExtractionModelTransformer::getConformingInput() | |
280 { | |
281 DenseTimeValueModel *dtvm = | |
282 dynamic_cast<DenseTimeValueModel *>(getInputModel()); | |
283 if (!dtvm) { | |
284 std::cerr << "FeatureExtractionModelTransformer::getConformingInput: WARNING: Input model is not conformable to DenseTimeValueModel" << std::endl; | |
285 } | |
286 return dtvm; | |
287 } | |
288 | |
289 void | |
290 FeatureExtractionModelTransformer::run() | |
291 { | |
292 DenseTimeValueModel *input = getConformingInput(); | |
293 if (!input) return; | |
294 | |
295 if (!m_output) return; | |
296 | |
297 while (!input->isReady()) { | |
298 /* | |
299 if (dynamic_cast<WaveFileModel *>(input)) { | |
300 std::cerr << "FeatureExtractionModelTransformer::run: Model is not ready, but it's not a WaveFileModel (it's a " << typeid(input).name() << "), so that's OK" << std::endl; | |
301 sleep(2); | |
302 break; // no need to wait | |
303 } | |
304 */ | |
305 std::cerr << "FeatureExtractionModelTransformer::run: Waiting for input model to be ready..." << std::endl; | |
306 sleep(1); | |
307 } | |
308 | |
309 size_t sampleRate = input->getSampleRate(); | |
310 | |
311 size_t channelCount = input->getChannelCount(); | |
312 if (m_plugin->getMaxChannelCount() < channelCount) { | |
313 channelCount = 1; | |
314 } | |
315 | |
316 float **buffers = new float*[channelCount]; | |
317 for (size_t ch = 0; ch < channelCount; ++ch) { | |
318 buffers[ch] = new float[m_transform.getBlockSize() + 2]; | |
319 } | |
320 | |
321 size_t stepSize = m_transform.getStepSize(); | |
322 size_t blockSize = m_transform.getBlockSize(); | |
323 | |
324 bool frequencyDomain = (m_plugin->getInputDomain() == | |
325 Vamp::Plugin::FrequencyDomain); | |
326 std::vector<FFTModel *> fftModels; | |
327 | |
328 if (frequencyDomain) { | |
329 for (size_t ch = 0; ch < channelCount; ++ch) { | |
330 FFTModel *model = new FFTModel | |
331 (getConformingInput(), | |
332 channelCount == 1 ? m_input.getChannel() : ch, | |
333 m_transform.getWindowType(), | |
334 blockSize, | |
335 stepSize, | |
336 blockSize, | |
337 false, | |
338 StorageAdviser::PrecisionCritical); | |
339 if (!model->isOK()) { | |
340 delete model; | |
341 setCompletion(100); | |
342 //!!! need a better way to handle this -- previously we were using a QMessageBox but that isn't an appropriate thing to do here either | |
343 throw AllocationFailed("Failed to create the FFT model for this feature extraction model transformer"); | |
344 } | |
345 model->resume(); | |
346 fftModels.push_back(model); | |
347 } | |
348 } | |
349 | |
350 long startFrame = m_input.getModel()->getStartFrame(); | |
351 long endFrame = m_input.getModel()->getEndFrame(); | |
352 | |
353 RealTime contextStartRT = m_transform.getStartTime(); | |
354 RealTime contextDurationRT = m_transform.getDuration(); | |
355 | |
356 long contextStart = | |
357 RealTime::realTime2Frame(contextStartRT, sampleRate); | |
358 | |
359 long contextDuration = | |
360 RealTime::realTime2Frame(contextDurationRT, sampleRate); | |
361 | |
362 if (contextStart == 0 || contextStart < startFrame) { | |
363 contextStart = startFrame; | |
364 } | |
365 | |
366 if (contextDuration == 0) { | |
367 contextDuration = endFrame - contextStart; | |
368 } | |
369 if (contextStart + contextDuration > endFrame) { | |
370 contextDuration = endFrame - contextStart; | |
371 } | |
372 | |
373 long blockFrame = contextStart; | |
374 | |
375 long prevCompletion = 0; | |
376 | |
377 setCompletion(0); | |
378 | |
379 while (!m_abandoned) { | |
380 | |
381 if (frequencyDomain) { | |
382 if (blockFrame - int(blockSize)/2 > | |
383 contextStart + contextDuration) break; | |
384 } else { | |
385 if (blockFrame >= | |
386 contextStart + contextDuration) break; | |
387 } | |
388 | |
389 // std::cerr << "FeatureExtractionModelTransformer::run: blockFrame " | |
390 // << blockFrame << ", endFrame " << endFrame << ", blockSize " | |
391 // << blockSize << std::endl; | |
392 | |
393 long completion = | |
394 (((blockFrame - contextStart) / stepSize) * 99) / | |
395 (contextDuration / stepSize); | |
396 | |
397 // channelCount is either m_input.getModel()->channelCount or 1 | |
398 | |
399 if (frequencyDomain) { | |
400 for (size_t ch = 0; ch < channelCount; ++ch) { | |
401 int column = (blockFrame - startFrame) / stepSize; | |
402 for (size_t i = 0; i <= blockSize/2; ++i) { | |
403 fftModels[ch]->getValuesAt | |
404 (column, i, buffers[ch][i*2], buffers[ch][i*2+1]); | |
405 } | |
406 } | |
407 } else { | |
408 getFrames(channelCount, blockFrame, blockSize, buffers); | |
409 } | |
410 | |
411 Vamp::Plugin::FeatureSet features = m_plugin->process | |
412 (buffers, Vamp::RealTime::frame2RealTime(blockFrame, sampleRate)); | |
413 | |
414 for (size_t fi = 0; fi < features[m_outputFeatureNo].size(); ++fi) { | |
415 Vamp::Plugin::Feature feature = | |
416 features[m_outputFeatureNo][fi]; | |
417 addFeature(blockFrame, feature); | |
418 } | |
419 | |
420 if (blockFrame == contextStart || completion > prevCompletion) { | |
421 setCompletion(completion); | |
422 prevCompletion = completion; | |
423 } | |
424 | |
425 blockFrame += stepSize; | |
426 } | |
427 | |
428 if (m_abandoned) return; | |
429 | |
430 Vamp::Plugin::FeatureSet features = m_plugin->getRemainingFeatures(); | |
431 | |
432 for (size_t fi = 0; fi < features[m_outputFeatureNo].size(); ++fi) { | |
433 Vamp::Plugin::Feature feature = | |
434 features[m_outputFeatureNo][fi]; | |
435 addFeature(blockFrame, feature); | |
436 } | |
437 | |
438 if (frequencyDomain) { | |
439 for (size_t ch = 0; ch < channelCount; ++ch) { | |
440 delete fftModels[ch]; | |
441 } | |
442 } | |
443 | |
444 setCompletion(100); | |
445 } | |
446 | |
447 void | |
448 FeatureExtractionModelTransformer::getFrames(int channelCount, | |
449 long startFrame, long size, | |
450 float **buffers) | |
451 { | |
452 long offset = 0; | |
453 | |
454 if (startFrame < 0) { | |
455 for (int c = 0; c < channelCount; ++c) { | |
456 for (int i = 0; i < size && startFrame + i < 0; ++i) { | |
457 buffers[c][i] = 0.0f; | |
458 } | |
459 } | |
460 offset = -startFrame; | |
461 size -= offset; | |
462 if (size <= 0) return; | |
463 startFrame = 0; | |
464 } | |
465 | |
466 DenseTimeValueModel *input = getConformingInput(); | |
467 if (!input) return; | |
468 | |
469 long got = 0; | |
470 | |
471 if (channelCount == 1) { | |
472 | |
473 got = input->getData(m_input.getChannel(), startFrame, size, | |
474 buffers[0] + offset); | |
475 | |
476 if (m_input.getChannel() == -1 && input->getChannelCount() > 1) { | |
477 // use mean instead of sum, as plugin input | |
478 float cc = float(input->getChannelCount()); | |
479 for (long i = 0; i < size; ++i) { | |
480 buffers[0][i + offset] /= cc; | |
481 } | |
482 } | |
483 | |
484 } else { | |
485 | |
486 float **writebuf = buffers; | |
487 if (offset > 0) { | |
488 writebuf = new float *[channelCount]; | |
489 for (int i = 0; i < channelCount; ++i) { | |
490 writebuf[i] = buffers[i] + offset; | |
491 } | |
492 } | |
493 | |
494 got = input->getData(0, channelCount-1, startFrame, size, writebuf); | |
495 | |
496 if (writebuf != buffers) delete[] writebuf; | |
497 } | |
498 | |
499 while (got < size) { | |
500 for (int c = 0; c < channelCount; ++c) { | |
501 buffers[c][got + offset] = 0.0; | |
502 } | |
503 ++got; | |
504 } | |
505 } | |
506 | |
507 void | |
508 FeatureExtractionModelTransformer::addFeature(size_t blockFrame, | |
509 const Vamp::Plugin::Feature &feature) | |
510 { | |
511 size_t inputRate = m_input.getModel()->getSampleRate(); | |
512 | |
513 // std::cerr << "FeatureExtractionModelTransformer::addFeature(" | |
514 // << blockFrame << ")" << std::endl; | |
515 | |
516 int binCount = 1; | |
517 if (m_descriptor->hasFixedBinCount) { | |
518 binCount = m_descriptor->binCount; | |
519 } | |
520 | |
521 size_t frame = blockFrame; | |
522 | |
523 if (m_descriptor->sampleType == | |
524 Vamp::Plugin::OutputDescriptor::VariableSampleRate) { | |
525 | |
526 if (!feature.hasTimestamp) { | |
527 std::cerr | |
528 << "WARNING: FeatureExtractionModelTransformer::addFeature: " | |
529 << "Feature has variable sample rate but no timestamp!" | |
530 << std::endl; | |
531 return; | |
532 } else { | |
533 frame = Vamp::RealTime::realTime2Frame(feature.timestamp, inputRate); | |
534 } | |
535 | |
536 } else if (m_descriptor->sampleType == | |
537 Vamp::Plugin::OutputDescriptor::FixedSampleRate) { | |
538 | |
539 if (feature.hasTimestamp) { | |
540 //!!! warning: sampleRate may be non-integral | |
541 frame = Vamp::RealTime::realTime2Frame(feature.timestamp, | |
542 lrintf(m_descriptor->sampleRate)); | |
543 } else { | |
544 frame = m_output->getEndFrame(); | |
545 } | |
546 } | |
547 | |
548 if (binCount == 0) { | |
549 | |
550 SparseOneDimensionalModel *model = | |
551 getConformingOutput<SparseOneDimensionalModel>(); | |
552 if (!model) return; | |
553 | |
554 model->addPoint(SparseOneDimensionalModel::Point(frame, feature.label.c_str())); | |
555 | |
556 } else if (binCount == 1) { | |
557 | |
558 float value = 0.0; | |
559 if (feature.values.size() > 0) value = feature.values[0]; | |
560 | |
561 SparseTimeValueModel *model = | |
562 getConformingOutput<SparseTimeValueModel>(); | |
563 if (!model) return; | |
564 | |
565 model->addPoint(SparseTimeValueModel::Point(frame, value, feature.label.c_str())); | |
566 // std::cerr << "SparseTimeValueModel::addPoint(" << frame << ", " << value << "), " << feature.label.c_str() << std::endl; | |
567 | |
568 } else if (m_descriptor->sampleType == | |
569 Vamp::Plugin::OutputDescriptor::VariableSampleRate) { | |
570 | |
571 float pitch = 0.0; | |
572 if (feature.values.size() > 0) pitch = feature.values[0]; | |
573 | |
574 float duration = 1; | |
575 if (feature.values.size() > 1) duration = feature.values[1]; | |
576 | |
577 float velocity = 100; | |
578 if (feature.values.size() > 2) velocity = feature.values[2]; | |
579 if (velocity < 0) velocity = 127; | |
580 if (velocity > 127) velocity = 127; | |
581 | |
582 NoteModel *model = getConformingOutput<NoteModel>(); | |
583 if (!model) return; | |
584 | |
585 model->addPoint(NoteModel::Point(frame, pitch, | |
586 lrintf(duration), | |
587 velocity / 127.f, | |
588 feature.label.c_str())); | |
589 | |
590 } else { | |
591 | |
592 DenseThreeDimensionalModel::Column values = feature.values; | |
593 | |
594 EditableDenseThreeDimensionalModel *model = | |
595 getConformingOutput<EditableDenseThreeDimensionalModel>(); | |
596 if (!model) return; | |
597 | |
598 model->setColumn(frame / model->getResolution(), values); | |
599 } | |
600 } | |
601 | |
602 void | |
603 FeatureExtractionModelTransformer::setCompletion(int completion) | |
604 { | |
605 int binCount = 1; | |
606 if (m_descriptor->hasFixedBinCount) { | |
607 binCount = m_descriptor->binCount; | |
608 } | |
609 | |
610 // std::cerr << "FeatureExtractionModelTransformer::setCompletion(" | |
611 // << completion << ")" << std::endl; | |
612 | |
613 if (binCount == 0) { | |
614 | |
615 SparseOneDimensionalModel *model = | |
616 getConformingOutput<SparseOneDimensionalModel>(); | |
617 if (!model) return; | |
618 model->setCompletion(completion, true); //!!!m_context.updates); | |
619 | |
620 } else if (binCount == 1) { | |
621 | |
622 SparseTimeValueModel *model = | |
623 getConformingOutput<SparseTimeValueModel>(); | |
624 if (!model) return; | |
625 model->setCompletion(completion, true); //!!!m_context.updates); | |
626 | |
627 } else if (m_descriptor->sampleType == | |
628 Vamp::Plugin::OutputDescriptor::VariableSampleRate) { | |
629 | |
630 NoteModel *model = | |
631 getConformingOutput<NoteModel>(); | |
632 if (!model) return; | |
633 model->setCompletion(completion, true); //!!!m_context.updates); | |
634 | |
635 } else { | |
636 | |
637 EditableDenseThreeDimensionalModel *model = | |
638 getConformingOutput<EditableDenseThreeDimensionalModel>(); | |
639 if (!model) return; | |
640 model->setCompletion(completion, true); //!!!m_context.updates); | |
641 } | |
642 } | |
643 |