Mercurial > hg > svcore
comparison plugin/transform/FeatureExtractionModelTransformer.cpp @ 350:d7c41483af8f
* Merge from transforms branch -- switch over to using Transform object
  properly
| author | Chris Cannam | 
|---|---|
| date | Fri, 07 Dec 2007 16:47:31 +0000 | 
| parents | 277006c62fea | 
| children | 399ea254afd6 | 
   comparison
  equal
  deleted
  inserted
  replaced
| 348:edda24bb85fc | 350:d7c41483af8f | 
|---|---|
| 27 #include "data/model/DenseTimeValueModel.h" | 27 #include "data/model/DenseTimeValueModel.h" | 
| 28 #include "data/model/NoteModel.h" | 28 #include "data/model/NoteModel.h" | 
| 29 #include "data/model/FFTModel.h" | 29 #include "data/model/FFTModel.h" | 
| 30 #include "data/model/WaveFileModel.h" | 30 #include "data/model/WaveFileModel.h" | 
| 31 | 31 | 
| 32 #include "TransformFactory.h" | |
| 33 | |
| 32 #include <QMessageBox> | 34 #include <QMessageBox> | 
| 33 | 35 | 
| 34 #include <iostream> | 36 #include <iostream> | 
| 35 | 37 | 
| 36 FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Model *inputModel, | 38 FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Input in, | 
| 37 QString pluginId, | 39 const Transform &transform) : | 
| 38 const ExecutionContext &context, | 40 ModelTransformer(in, transform), | 
| 39 QString configurationXml, | |
| 40 QString outputName) : | |
| 41 PluginTransformer(inputModel, context), | |
| 42 m_plugin(0), | 41 m_plugin(0), | 
| 43 m_descriptor(0), | 42 m_descriptor(0), | 
| 44 m_outputFeatureNo(0) | 43 m_outputFeatureNo(0) | 
| 45 { | 44 { | 
| 46 // std::cerr << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: plugin " << pluginId.toStdString() << ", outputName " << outputName.toStdString() << std::endl; | 45 // std::cerr << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: plugin " << pluginId.toStdString() << ", outputName " << m_transform.getOutput().toStdString() << std::endl; | 
| 46 | |
| 47 QString pluginId = transform.getPluginIdentifier(); | |
| 47 | 48 | 
| 48 FeatureExtractionPluginFactory *factory = | 49 FeatureExtractionPluginFactory *factory = | 
| 49 FeatureExtractionPluginFactory::instanceFor(pluginId); | 50 FeatureExtractionPluginFactory::instanceFor(pluginId); | 
| 50 | 51 | 
| 51 if (!factory) { | 52 if (!factory) { | 
| 52 std::cerr << "FeatureExtractionModelTransformer: No factory available for plugin id \"" | 53 std::cerr << "FeatureExtractionModelTransformer: No factory available for plugin id \"" | 
| 53 << pluginId.toStdString() << "\"" << std::endl; | 54 << pluginId.toStdString() << "\"" << std::endl; | 
| 54 return; | 55 return; | 
| 55 } | 56 } | 
| 56 | 57 | 
| 57 m_plugin = factory->instantiatePlugin(pluginId, m_input->getSampleRate()); | 58 DenseTimeValueModel *input = getConformingInput(); | 
| 58 | 59 if (!input) { | 
| 60 std::cerr << "FeatureExtractionModelTransformer: Input model not conformable" << std::endl; | |
| 61 return; | |
| 62 } | |
| 63 | |
| 64 m_plugin = factory->instantiatePlugin(pluginId, input->getSampleRate()); | |
| 59 if (!m_plugin) { | 65 if (!m_plugin) { | 
| 60 std::cerr << "FeatureExtractionModelTransformer: Failed to instantiate plugin \"" | 66 std::cerr << "FeatureExtractionModelTransformer: Failed to instantiate plugin \"" | 
| 61 << pluginId.toStdString() << "\"" << std::endl; | 67 << pluginId.toStdString() << "\"" << std::endl; | 
| 62 return; | 68 return; | 
| 63 } | 69 } | 
| 64 | 70 | 
| 65 m_context.makeConsistentWithPlugin(m_plugin); | 71 TransformFactory::getInstance()->makeContextConsistentWithPlugin | 
| 66 | 72 (m_transform, m_plugin); | 
| 67 if (configurationXml != "") { | 73 | 
| 68 PluginXml(m_plugin).setParametersFromXml(configurationXml); | 74 TransformFactory::getInstance()->setPluginParameters | 
| 69 } | 75 (m_transform, m_plugin); | 
| 70 | |
| 71 DenseTimeValueModel *input = getInput(); | |
| 72 if (!input) return; | |
| 73 | 76 | 
| 74 size_t channelCount = input->getChannelCount(); | 77 size_t channelCount = input->getChannelCount(); | 
| 75 if (m_plugin->getMaxChannelCount() < channelCount) { | 78 if (m_plugin->getMaxChannelCount() < channelCount) { | 
| 76 channelCount = 1; | 79 channelCount = 1; | 
| 77 } | 80 } | 
| 83 << input->getChannelCount() << ")" << std::endl; | 86 << input->getChannelCount() << ")" << std::endl; | 
| 84 return; | 87 return; | 
| 85 } | 88 } | 
| 86 | 89 | 
| 87 std::cerr << "Initialising feature extraction plugin with channels = " | 90 std::cerr << "Initialising feature extraction plugin with channels = " | 
| 88 << channelCount << ", step = " << m_context.stepSize | 91 << channelCount << ", step = " << m_transform.getStepSize() | 
| 89 << ", block = " << m_context.blockSize << std::endl; | 92 << ", block = " << m_transform.getBlockSize() << std::endl; | 
| 90 | 93 | 
| 91 if (!m_plugin->initialise(channelCount, | 94 if (!m_plugin->initialise(channelCount, | 
| 92 m_context.stepSize, | 95 m_transform.getStepSize(), | 
| 93 m_context.blockSize)) { | 96 m_transform.getBlockSize())) { | 
| 94 std::cerr << "FeatureExtractionModelTransformer: Plugin " | 97 std::cerr << "FeatureExtractionModelTransformer: Plugin " | 
| 95 << m_plugin->getIdentifier() << " failed to initialise!" << std::endl; | 98 << pluginId.toStdString() << " failed to initialise!" << std::endl; | 
| 96 return; | 99 return; | 
| 97 } | 100 } | 
| 98 | 101 | 
| 99 Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors(); | 102 Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors(); | 
| 100 | 103 | 
| 103 << pluginId.toStdString() << "\" has no outputs" << std::endl; | 106 << pluginId.toStdString() << "\" has no outputs" << std::endl; | 
| 104 return; | 107 return; | 
| 105 } | 108 } | 
| 106 | 109 | 
| 107 for (size_t i = 0; i < outputs.size(); ++i) { | 110 for (size_t i = 0; i < outputs.size(); ++i) { | 
| 108 if (outputName == "" || outputs[i].identifier == outputName.toStdString()) { | 111 if (m_transform.getOutput() == "" || | 
| 112 outputs[i].identifier == m_transform.getOutput().toStdString()) { | |
| 109 m_outputFeatureNo = i; | 113 m_outputFeatureNo = i; | 
| 110 m_descriptor = new Vamp::Plugin::OutputDescriptor | 114 m_descriptor = new Vamp::Plugin::OutputDescriptor | 
| 111 (outputs[i]); | 115 (outputs[i]); | 
| 112 break; | 116 break; | 
| 113 } | 117 } | 
| 114 } | 118 } | 
| 115 | 119 | 
| 116 if (!m_descriptor) { | 120 if (!m_descriptor) { | 
| 117 std::cerr << "FeatureExtractionModelTransformer: Plugin \"" | 121 std::cerr << "FeatureExtractionModelTransformer: Plugin \"" | 
| 118 << pluginId.toStdString() << "\" has no output named \"" | 122 << pluginId.toStdString() << "\" has no output named \"" | 
| 119 << outputName.toStdString() << "\"" << std::endl; | 123 << m_transform.getOutput().toStdString() << "\"" << std::endl; | 
| 120 return; | 124 return; | 
| 121 } | 125 } | 
| 122 | 126 | 
| 123 // std::cerr << "FeatureExtractionModelTransformer: output sample type " | 127 // std::cerr << "FeatureExtractionModelTransformer: output sample type " | 
| 124 // << m_descriptor->sampleType << std::endl; | 128 // << m_descriptor->sampleType << std::endl; | 
| 138 minValue = m_descriptor->minValue; | 142 minValue = m_descriptor->minValue; | 
| 139 maxValue = m_descriptor->maxValue; | 143 maxValue = m_descriptor->maxValue; | 
| 140 haveExtents = true; | 144 haveExtents = true; | 
| 141 } | 145 } | 
| 142 | 146 | 
| 143 size_t modelRate = m_input->getSampleRate(); | 147 size_t modelRate = input->getSampleRate(); | 
| 144 size_t modelResolution = 1; | 148 size_t modelResolution = 1; | 
| 145 | 149 | 
| 146 switch (m_descriptor->sampleType) { | 150 switch (m_descriptor->sampleType) { | 
| 147 | 151 | 
| 148 case Vamp::Plugin::OutputDescriptor::VariableSampleRate: | 152 case Vamp::Plugin::OutputDescriptor::VariableSampleRate: | 
| 150 modelResolution = size_t(modelRate / m_descriptor->sampleRate + 0.001); | 154 modelResolution = size_t(modelRate / m_descriptor->sampleRate + 0.001); | 
| 151 } | 155 } | 
| 152 break; | 156 break; | 
| 153 | 157 | 
| 154 case Vamp::Plugin::OutputDescriptor::OneSamplePerStep: | 158 case Vamp::Plugin::OutputDescriptor::OneSamplePerStep: | 
| 155 modelResolution = m_context.stepSize; | 159 modelResolution = m_transform.getStepSize(); | 
| 156 break; | 160 break; | 
| 157 | 161 | 
| 158 case Vamp::Plugin::OutputDescriptor::FixedSampleRate: | 162 case Vamp::Plugin::OutputDescriptor::FixedSampleRate: | 
| 159 modelRate = size_t(m_descriptor->sampleRate + 0.001); | 163 modelRate = size_t(m_descriptor->sampleRate + 0.001); | 
| 160 break; | 164 break; | 
| 217 } | 221 } | 
| 218 | 222 | 
| 219 m_output = model; | 223 m_output = model; | 
| 220 } | 224 } | 
| 221 | 225 | 
| 222 if (m_output) m_output->setSourceModel(m_input); | 226 if (m_output) m_output->setSourceModel(input); | 
| 223 } | 227 } | 
| 224 | 228 | 
| 225 FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer() | 229 FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer() | 
| 226 { | 230 { | 
| 227 std::cerr << "FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer()" << std::endl; | 231 std::cerr << "FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer()" << std::endl; | 
| 228 delete m_plugin; | 232 delete m_plugin; | 
| 229 delete m_descriptor; | 233 delete m_descriptor; | 
| 230 } | 234 } | 
| 231 | 235 | 
| 232 DenseTimeValueModel * | 236 DenseTimeValueModel * | 
| 233 FeatureExtractionModelTransformer::getInput() | 237 FeatureExtractionModelTransformer::getConformingInput() | 
| 234 { | 238 { | 
| 235 DenseTimeValueModel *dtvm = | 239 DenseTimeValueModel *dtvm = | 
| 236 dynamic_cast<DenseTimeValueModel *>(getInputModel()); | 240 dynamic_cast<DenseTimeValueModel *>(getInputModel()); | 
| 237 if (!dtvm) { | 241 if (!dtvm) { | 
| 238 std::cerr << "FeatureExtractionModelTransformer::getInput: WARNING: Input model is not conformable to DenseTimeValueModel" << std::endl; | 242 std::cerr << "FeatureExtractionModelTransformer::getConformingInput: WARNING: Input model is not conformable to DenseTimeValueModel" << std::endl; | 
| 239 } | 243 } | 
| 240 return dtvm; | 244 return dtvm; | 
| 241 } | 245 } | 
| 242 | 246 | 
| 243 void | 247 void | 
| 244 FeatureExtractionModelTransformer::run() | 248 FeatureExtractionModelTransformer::run() | 
| 245 { | 249 { | 
| 246 DenseTimeValueModel *input = getInput(); | 250 DenseTimeValueModel *input = getConformingInput(); | 
| 247 if (!input) return; | 251 if (!input) return; | 
| 248 | 252 | 
| 249 if (!m_output) return; | 253 if (!m_output) return; | 
| 250 | 254 | 
| 251 while (!input->isReady()) { | 255 while (!input->isReady()) { | 
| 258 */ | 262 */ | 
| 259 std::cerr << "FeatureExtractionModelTransformer::run: Waiting for input model to be ready..." << std::endl; | 263 std::cerr << "FeatureExtractionModelTransformer::run: Waiting for input model to be ready..." << std::endl; | 
| 260 sleep(1); | 264 sleep(1); | 
| 261 } | 265 } | 
| 262 | 266 | 
| 263 size_t sampleRate = m_input->getSampleRate(); | 267 size_t sampleRate = input->getSampleRate(); | 
| 264 | 268 | 
| 265 size_t channelCount = input->getChannelCount(); | 269 size_t channelCount = input->getChannelCount(); | 
| 266 if (m_plugin->getMaxChannelCount() < channelCount) { | 270 if (m_plugin->getMaxChannelCount() < channelCount) { | 
| 267 channelCount = 1; | 271 channelCount = 1; | 
| 268 } | 272 } | 
| 269 | 273 | 
| 270 float **buffers = new float*[channelCount]; | 274 float **buffers = new float*[channelCount]; | 
| 271 for (size_t ch = 0; ch < channelCount; ++ch) { | 275 for (size_t ch = 0; ch < channelCount; ++ch) { | 
| 272 buffers[ch] = new float[m_context.blockSize + 2]; | 276 buffers[ch] = new float[m_transform.getBlockSize() + 2]; | 
| 273 } | 277 } | 
| 278 | |
| 279 size_t stepSize = m_transform.getStepSize(); | |
| 280 size_t blockSize = m_transform.getBlockSize(); | |
| 274 | 281 | 
| 275 bool frequencyDomain = (m_plugin->getInputDomain() == | 282 bool frequencyDomain = (m_plugin->getInputDomain() == | 
| 276 Vamp::Plugin::FrequencyDomain); | 283 Vamp::Plugin::FrequencyDomain); | 
| 277 std::vector<FFTModel *> fftModels; | 284 std::vector<FFTModel *> fftModels; | 
| 278 | 285 | 
| 279 if (frequencyDomain) { | 286 if (frequencyDomain) { | 
| 280 for (size_t ch = 0; ch < channelCount; ++ch) { | 287 for (size_t ch = 0; ch < channelCount; ++ch) { | 
| 281 FFTModel *model = new FFTModel | 288 FFTModel *model = new FFTModel | 
| 282 (getInput(), | 289 (getConformingInput(), | 
| 283 channelCount == 1 ? m_context.channel : ch, | 290 channelCount == 1 ? m_input.getChannel() : ch, | 
| 284 m_context.windowType, | 291 m_transform.getWindowType(), | 
| 285 m_context.blockSize, | 292 blockSize, | 
| 286 m_context.stepSize, | 293 stepSize, | 
| 287 m_context.blockSize, | 294 blockSize, | 
| 288 false, | 295 false, | 
| 289 StorageAdviser::PrecisionCritical); | 296 StorageAdviser::PrecisionCritical); | 
| 290 if (!model->isOK()) { | 297 if (!model->isOK()) { | 
| 291 QMessageBox::critical | 298 QMessageBox::critical | 
| 292 (0, tr("FFT cache failed"), | 299 (0, tr("FFT cache failed"), | 
| 299 model->resume(); | 306 model->resume(); | 
| 300 fftModels.push_back(model); | 307 fftModels.push_back(model); | 
| 301 } | 308 } | 
| 302 } | 309 } | 
| 303 | 310 | 
| 304 long startFrame = m_input->getStartFrame(); | 311 long startFrame = m_input.getModel()->getStartFrame(); | 
| 305 long endFrame = m_input->getEndFrame(); | 312 long endFrame = m_input.getModel()->getEndFrame(); | 
| 306 | 313 | 
| 307 long contextStart = m_context.startFrame; | 314 RealTime contextStartRT = m_transform.getStartTime(); | 
| 308 long contextDuration = m_context.duration; | 315 RealTime contextDurationRT = m_transform.getDuration(); | 
| 316 | |
| 317 long contextStart = | |
| 318 RealTime::realTime2Frame(contextStartRT, sampleRate); | |
| 319 | |
| 320 long contextDuration = | |
| 321 RealTime::realTime2Frame(contextDurationRT, sampleRate); | |
| 309 | 322 | 
| 310 if (contextStart == 0 || contextStart < startFrame) { | 323 if (contextStart == 0 || contextStart < startFrame) { | 
| 311 contextStart = startFrame; | 324 contextStart = startFrame; | 
| 312 } | 325 } | 
| 313 | 326 | 
| 325 setCompletion(0); | 338 setCompletion(0); | 
| 326 | 339 | 
| 327 while (!m_abandoned) { | 340 while (!m_abandoned) { | 
| 328 | 341 | 
| 329 if (frequencyDomain) { | 342 if (frequencyDomain) { | 
| 330 if (blockFrame - int(m_context.blockSize)/2 > | 343 if (blockFrame - int(blockSize)/2 > | 
| 331 contextStart + contextDuration) break; | 344 contextStart + contextDuration) break; | 
| 332 } else { | 345 } else { | 
| 333 if (blockFrame >= | 346 if (blockFrame >= | 
| 334 contextStart + contextDuration) break; | 347 contextStart + contextDuration) break; | 
| 335 } | 348 } | 
| 336 | 349 | 
| 337 // std::cerr << "FeatureExtractionModelTransformer::run: blockFrame " | 350 // std::cerr << "FeatureExtractionModelTransformer::run: blockFrame " | 
| 338 // << blockFrame << ", endFrame " << endFrame << ", blockSize " | 351 // << blockFrame << ", endFrame " << endFrame << ", blockSize " | 
| 339 // << m_context.blockSize << std::endl; | 352 // << blockSize << std::endl; | 
| 340 | 353 | 
| 341 long completion = | 354 long completion = | 
| 342 (((blockFrame - contextStart) / m_context.stepSize) * 99) / | 355 (((blockFrame - contextStart) / stepSize) * 99) / | 
| 343 (contextDuration / m_context.stepSize); | 356 (contextDuration / stepSize); | 
| 344 | 357 | 
| 345 // channelCount is either m_input->channelCount or 1 | 358 // channelCount is either m_input.getModel()->channelCount or 1 | 
| 346 | 359 | 
| 347 for (size_t ch = 0; ch < channelCount; ++ch) { | 360 for (size_t ch = 0; ch < channelCount; ++ch) { | 
| 348 if (frequencyDomain) { | 361 if (frequencyDomain) { | 
| 349 int column = (blockFrame - startFrame) / m_context.stepSize; | 362 int column = (blockFrame - startFrame) / stepSize; | 
| 350 for (size_t i = 0; i <= m_context.blockSize/2; ++i) { | 363 for (size_t i = 0; i <= blockSize/2; ++i) { | 
| 351 fftModels[ch]->getValuesAt | 364 fftModels[ch]->getValuesAt | 
| 352 (column, i, buffers[ch][i*2], buffers[ch][i*2+1]); | 365 (column, i, buffers[ch][i*2], buffers[ch][i*2+1]); | 
| 353 } | 366 } | 
| 354 } else { | 367 } else { | 
| 355 getFrames(ch, channelCount, | 368 getFrames(ch, channelCount, | 
| 356 blockFrame, m_context.blockSize, buffers[ch]); | 369 blockFrame, blockSize, buffers[ch]); | 
| 357 } | 370 } | 
| 358 } | 371 } | 
| 359 | 372 | 
| 360 Vamp::Plugin::FeatureSet features = m_plugin->process | 373 Vamp::Plugin::FeatureSet features = m_plugin->process | 
| 361 (buffers, Vamp::RealTime::frame2RealTime(blockFrame, sampleRate)); | 374 (buffers, Vamp::RealTime::frame2RealTime(blockFrame, sampleRate)); | 
| 369 if (blockFrame == contextStart || completion > prevCompletion) { | 382 if (blockFrame == contextStart || completion > prevCompletion) { | 
| 370 setCompletion(completion); | 383 setCompletion(completion); | 
| 371 prevCompletion = completion; | 384 prevCompletion = completion; | 
| 372 } | 385 } | 
| 373 | 386 | 
| 374 blockFrame += m_context.stepSize; | 387 blockFrame += stepSize; | 
| 375 } | 388 } | 
| 376 | 389 | 
| 377 if (m_abandoned) return; | 390 if (m_abandoned) return; | 
| 378 | 391 | 
| 379 Vamp::Plugin::FeatureSet features = m_plugin->getRemainingFeatures(); | 392 Vamp::Plugin::FeatureSet features = m_plugin->getRemainingFeatures(); | 
| 408 size -= offset; | 421 size -= offset; | 
| 409 if (size <= 0) return; | 422 if (size <= 0) return; | 
| 410 startFrame = 0; | 423 startFrame = 0; | 
| 411 } | 424 } | 
| 412 | 425 | 
| 413 long got = getInput()->getData | 426 DenseTimeValueModel *input = getConformingInput(); | 
| 414 ((channelCount == 1 ? m_context.channel : channel), | 427 if (!input) return; | 
| 428 | |
| 429 long got = input->getData | |
| 430 ((channelCount == 1 ? m_input.getChannel() : channel), | |
| 415 startFrame, size, buffer + offset); | 431 startFrame, size, buffer + offset); | 
| 416 | 432 | 
| 417 while (got < size) { | 433 while (got < size) { | 
| 418 buffer[offset + got] = 0.0; | 434 buffer[offset + got] = 0.0; | 
| 419 ++got; | 435 ++got; | 
| 420 } | 436 } | 
| 421 | 437 | 
| 422 if (m_context.channel == -1 && channelCount == 1 && | 438 if (m_input.getChannel() == -1 && channelCount == 1 && | 
| 423 getInput()->getChannelCount() > 1) { | 439 input->getChannelCount() > 1) { | 
| 424 // use mean instead of sum, as plugin input | 440 // use mean instead of sum, as plugin input | 
| 425 int cc = getInput()->getChannelCount(); | 441 int cc = input->getChannelCount(); | 
| 426 for (long i = 0; i < size; ++i) { | 442 for (long i = 0; i < size; ++i) { | 
| 427 buffer[i] /= cc; | 443 buffer[i] /= cc; | 
| 428 } | 444 } | 
| 429 } | 445 } | 
| 430 } | 446 } | 
| 431 | 447 | 
| 432 void | 448 void | 
| 433 FeatureExtractionModelTransformer::addFeature(size_t blockFrame, | 449 FeatureExtractionModelTransformer::addFeature(size_t blockFrame, | 
| 434 const Vamp::Plugin::Feature &feature) | 450 const Vamp::Plugin::Feature &feature) | 
| 435 { | 451 { | 
| 436 size_t inputRate = m_input->getSampleRate(); | 452 size_t inputRate = m_input.getModel()->getSampleRate(); | 
| 437 | 453 | 
| 438 // std::cerr << "FeatureExtractionModelTransformer::addFeature(" | 454 // std::cerr << "FeatureExtractionModelTransformer::addFeature(" | 
| 439 // << blockFrame << ")" << std::endl; | 455 // << blockFrame << ")" << std::endl; | 
| 440 | 456 | 
| 441 int binCount = 1; | 457 int binCount = 1; | 
| 470 } | 486 } | 
| 471 } | 487 } | 
| 472 | 488 | 
| 473 if (binCount == 0) { | 489 if (binCount == 0) { | 
| 474 | 490 | 
| 475 SparseOneDimensionalModel *model = getOutput<SparseOneDimensionalModel>(); | 491 SparseOneDimensionalModel *model = | 
| 476 if (!model) return; | 492 getConformingOutput<SparseOneDimensionalModel>(); | 
| 493 if (!model) return; | |
| 494 | |
| 477 model->addPoint(SparseOneDimensionalModel::Point(frame, feature.label.c_str())); | 495 model->addPoint(SparseOneDimensionalModel::Point(frame, feature.label.c_str())); | 
| 478 | 496 | 
| 479 } else if (binCount == 1) { | 497 } else if (binCount == 1) { | 
| 480 | 498 | 
| 481 float value = 0.0; | 499 float value = 0.0; | 
| 482 if (feature.values.size() > 0) value = feature.values[0]; | 500 if (feature.values.size() > 0) value = feature.values[0]; | 
| 483 | 501 | 
| 484 SparseTimeValueModel *model = getOutput<SparseTimeValueModel>(); | 502 SparseTimeValueModel *model = | 
| 485 if (!model) return; | 503 getConformingOutput<SparseTimeValueModel>(); | 
| 504 if (!model) return; | |
| 505 | |
| 486 model->addPoint(SparseTimeValueModel::Point(frame, value, feature.label.c_str())); | 506 model->addPoint(SparseTimeValueModel::Point(frame, value, feature.label.c_str())); | 
| 487 // std::cerr << "SparseTimeValueModel::addPoint(" << frame << ", " << value << "), " << feature.label.c_str() << std::endl; | 507 // std::cerr << "SparseTimeValueModel::addPoint(" << frame << ", " << value << "), " << feature.label.c_str() << std::endl; | 
| 488 | 508 | 
| 489 } else if (m_descriptor->sampleType == | 509 } else if (m_descriptor->sampleType == | 
| 490 Vamp::Plugin::OutputDescriptor::VariableSampleRate) { | 510 Vamp::Plugin::OutputDescriptor::VariableSampleRate) { | 
| 498 float velocity = 100; | 518 float velocity = 100; | 
| 499 if (feature.values.size() > 2) velocity = feature.values[2]; | 519 if (feature.values.size() > 2) velocity = feature.values[2]; | 
| 500 if (velocity < 0) velocity = 127; | 520 if (velocity < 0) velocity = 127; | 
| 501 if (velocity > 127) velocity = 127; | 521 if (velocity > 127) velocity = 127; | 
| 502 | 522 | 
| 503 NoteModel *model = getOutput<NoteModel>(); | 523 NoteModel *model = getConformingOutput<NoteModel>(); | 
| 504 if (!model) return; | 524 if (!model) return; | 
| 505 | 525 | 
| 506 model->addPoint(NoteModel::Point(frame, pitch, | 526 model->addPoint(NoteModel::Point(frame, pitch, | 
| 507 lrintf(duration), | 527 lrintf(duration), | 
| 508 velocity / 127.f, | 528 velocity / 127.f, | 
| 511 } else { | 531 } else { | 
| 512 | 532 | 
| 513 DenseThreeDimensionalModel::Column values = feature.values; | 533 DenseThreeDimensionalModel::Column values = feature.values; | 
| 514 | 534 | 
| 515 EditableDenseThreeDimensionalModel *model = | 535 EditableDenseThreeDimensionalModel *model = | 
| 516 getOutput<EditableDenseThreeDimensionalModel>(); | 536 getConformingOutput<EditableDenseThreeDimensionalModel>(); | 
| 517 if (!model) return; | 537 if (!model) return; | 
| 518 | 538 | 
| 519 model->setColumn(frame / model->getResolution(), values); | 539 model->setColumn(frame / model->getResolution(), values); | 
| 520 } | 540 } | 
| 521 } | 541 } | 
| 531 // std::cerr << "FeatureExtractionModelTransformer::setCompletion(" | 551 // std::cerr << "FeatureExtractionModelTransformer::setCompletion(" | 
| 532 // << completion << ")" << std::endl; | 552 // << completion << ")" << std::endl; | 
| 533 | 553 | 
| 534 if (binCount == 0) { | 554 if (binCount == 0) { | 
| 535 | 555 | 
| 536 SparseOneDimensionalModel *model = getOutput<SparseOneDimensionalModel>(); | 556 SparseOneDimensionalModel *model = | 
| 537 if (!model) return; | 557 getConformingOutput<SparseOneDimensionalModel>(); | 
| 538 model->setCompletion(completion, m_context.updates); | 558 if (!model) return; | 
| 559 model->setCompletion(completion, true); //!!!m_context.updates); | |
| 539 | 560 | 
| 540 } else if (binCount == 1) { | 561 } else if (binCount == 1) { | 
| 541 | 562 | 
| 542 SparseTimeValueModel *model = getOutput<SparseTimeValueModel>(); | 563 SparseTimeValueModel *model = | 
| 543 if (!model) return; | 564 getConformingOutput<SparseTimeValueModel>(); | 
| 544 model->setCompletion(completion, m_context.updates); | 565 if (!model) return; | 
| 566 model->setCompletion(completion, true); //!!!m_context.updates); | |
| 545 | 567 | 
| 546 } else if (m_descriptor->sampleType == | 568 } else if (m_descriptor->sampleType == | 
| 547 Vamp::Plugin::OutputDescriptor::VariableSampleRate) { | 569 Vamp::Plugin::OutputDescriptor::VariableSampleRate) { | 
| 548 | 570 | 
| 549 NoteModel *model = getOutput<NoteModel>(); | 571 NoteModel *model = | 
| 550 if (!model) return; | 572 getConformingOutput<NoteModel>(); | 
| 551 model->setCompletion(completion, m_context.updates); | 573 if (!model) return; | 
| 574 model->setCompletion(completion, true); //!!!m_context.updates); | |
| 552 | 575 | 
| 553 } else { | 576 } else { | 
| 554 | 577 | 
| 555 EditableDenseThreeDimensionalModel *model = | 578 EditableDenseThreeDimensionalModel *model = | 
| 556 getOutput<EditableDenseThreeDimensionalModel>(); | 579 getConformingOutput<EditableDenseThreeDimensionalModel>(); | 
| 557 if (!model) return; | 580 if (!model) return; | 
| 558 model->setCompletion(completion, m_context.updates); | 581 model->setCompletion(completion, true); //!!!m_context.updates); | 
| 559 } | 582 } | 
| 560 } | 583 } | 
| 561 | 584 | 
