annotate transform/FeatureExtractionModelTransformer.cpp @ 1211:5a1198083d9a piper

Pull out model creation into the transformer thread run(), so that all communications with the plugin server happen on a single thread. Then make the model accessor wait for them to be created (which still happens right at the start of processing) before returning.
author Chris Cannam
date Mon, 17 Oct 2016 14:18:23 +0100
parents aa588c391d1a
children ba16388b937d
rev   line source
Chris@320 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@320 2
Chris@320 3 /*
Chris@320 4 Sonic Visualiser
Chris@320 5 An audio file viewer and annotation editor.
Chris@320 6 Centre for Digital Music, Queen Mary, University of London.
Chris@320 7 This file copyright 2006 Chris Cannam and QMUL.
Chris@320 8
Chris@320 9 This program is free software; you can redistribute it and/or
Chris@320 10 modify it under the terms of the GNU General Public License as
Chris@320 11 published by the Free Software Foundation; either version 2 of the
Chris@320 12 License, or (at your option) any later version. See the file
Chris@320 13 COPYING included with this distribution for more information.
Chris@320 14 */
Chris@320 15
Chris@331 16 #include "FeatureExtractionModelTransformer.h"
Chris@320 17
Chris@320 18 #include "plugin/FeatureExtractionPluginFactory.h"
Chris@320 19 #include "plugin/PluginXml.h"
Chris@475 20 #include <vamp-hostsdk/Plugin.h>
Chris@320 21
Chris@320 22 #include "data/model/Model.h"
Chris@320 23 #include "base/Window.h"
Chris@387 24 #include "base/Exceptions.h"
Chris@320 25 #include "data/model/SparseOneDimensionalModel.h"
Chris@320 26 #include "data/model/SparseTimeValueModel.h"
Chris@320 27 #include "data/model/EditableDenseThreeDimensionalModel.h"
Chris@320 28 #include "data/model/DenseTimeValueModel.h"
Chris@320 29 #include "data/model/NoteModel.h"
gyorgyf@786 30 #include "data/model/FlexiNoteModel.h"
Chris@441 31 #include "data/model/RegionModel.h"
Chris@320 32 #include "data/model/FFTModel.h"
Chris@320 33 #include "data/model/WaveFileModel.h"
Chris@558 34 #include "rdf/PluginRDFDescription.h"
Chris@320 35
Chris@350 36 #include "TransformFactory.h"
Chris@350 37
Chris@320 38 #include <iostream>
Chris@320 39
Chris@859 40 #include <QSettings>
Chris@859 41
Chris@350 42 FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Input in,
Chris@859 43 const Transform &transform) :
Chris@350 44 ModelTransformer(in, transform),
Chris@1211 45 m_plugin(0),
Chris@1211 46 m_haveOutputs(false)
Chris@320 47 {
Chris@1080 48 SVDEBUG << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: plugin " << m_transforms.begin()->getPluginIdentifier() << ", outputName " << m_transforms.begin()->getOutput() << endl;
Chris@350 49
Chris@1211 50 // initialise();
Chris@849 51 }
Chris@849 52
Chris@849 53 FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Input in,
Chris@859 54 const Transforms &transforms) :
Chris@849 55 ModelTransformer(in, transforms),
Chris@1211 56 m_plugin(0),
Chris@1211 57 m_haveOutputs(false)
Chris@849 58 {
Chris@1080 59 if (m_transforms.empty()) {
Chris@1080 60 SVDEBUG << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: " << transforms.size() << " transform(s)" << endl;
Chris@1080 61 } else {
Chris@1080 62 SVDEBUG << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: " << transforms.size() << " transform(s), first has plugin " << m_transforms.begin()->getPluginIdentifier() << ", outputName " << m_transforms.begin()->getOutput() << endl;
Chris@1080 63 }
Chris@1080 64
Chris@1211 65 // initialise();
Chris@849 66 }
Chris@849 67
Chris@849 68 static bool
Chris@849 69 areTransformsSimilar(const Transform &t1, const Transform &t2)
Chris@849 70 {
Chris@849 71 Transform t2o(t2);
Chris@849 72 t2o.setOutput(t1.getOutput());
Chris@849 73 return t1 == t2o;
Chris@849 74 }
Chris@849 75
Chris@849 76 bool
Chris@849 77 FeatureExtractionModelTransformer::initialise()
Chris@849 78 {
Chris@849 79 // All transforms must use the same plugin, parameters, and
Chris@849 80 // inputs: they can differ only in choice of plugin output. So we
Chris@849 81 // initialise based purely on the first transform in the list (but
Chris@849 82 // first check that they are actually similar as promised)
Chris@849 83
Chris@849 84 for (int j = 1; j < (int)m_transforms.size(); ++j) {
Chris@849 85 if (!areTransformsSimilar(m_transforms[0], m_transforms[j])) {
Chris@849 86 m_message = tr("Transforms supplied to a single FeatureExtractionModelTransformer instance must be similar in every respect except plugin output");
Chris@849 87 return false;
Chris@849 88 }
Chris@849 89 }
Chris@849 90
Chris@849 91 Transform primaryTransform = m_transforms[0];
Chris@849 92
Chris@849 93 QString pluginId = primaryTransform.getPluginIdentifier();
Chris@320 94
Chris@320 95 FeatureExtractionPluginFactory *factory =
Chris@320 96 FeatureExtractionPluginFactory::instanceFor(pluginId);
Chris@320 97
Chris@320 98 if (!factory) {
Chris@361 99 m_message = tr("No factory available for feature extraction plugin id \"%1\" (unknown plugin type, or internal error?)").arg(pluginId);
Chris@849 100 return false;
Chris@320 101 }
Chris@320 102
Chris@350 103 DenseTimeValueModel *input = getConformingInput();
Chris@350 104 if (!input) {
Chris@361 105 m_message = tr("Input model for feature extraction plugin \"%1\" is of wrong type (internal error?)").arg(pluginId);
Chris@849 106 return false;
Chris@350 107 }
Chris@320 108
Chris@1211 109 cerr << "instantiating plugin for transform in thread "
Chris@1211 110 << QThread::currentThreadId() << endl;
Chris@1211 111
Chris@1040 112 m_plugin = factory->instantiatePlugin(pluginId, input->getSampleRate());
Chris@320 113 if (!m_plugin) {
Chris@361 114 m_message = tr("Failed to instantiate plugin \"%1\"").arg(pluginId);
Chris@849 115 return false;
Chris@320 116 }
Chris@320 117
Chris@350 118 TransformFactory::getInstance()->makeContextConsistentWithPlugin
Chris@849 119 (primaryTransform, m_plugin);
Chris@343 120
Chris@350 121 TransformFactory::getInstance()->setPluginParameters
Chris@849 122 (primaryTransform, m_plugin);
Chris@320 123
Chris@930 124 int channelCount = input->getChannelCount();
Chris@930 125 if ((int)m_plugin->getMaxChannelCount() < channelCount) {
Chris@320 126 channelCount = 1;
Chris@320 127 }
Chris@930 128 if ((int)m_plugin->getMinChannelCount() > channelCount) {
Chris@361 129 m_message = tr("Cannot provide enough channels to feature extraction plugin \"%1\" (plugin min is %2, max %3; input model has %4)")
Chris@361 130 .arg(pluginId)
Chris@361 131 .arg(m_plugin->getMinChannelCount())
Chris@361 132 .arg(m_plugin->getMaxChannelCount())
Chris@361 133 .arg(input->getChannelCount());
Chris@849 134 return false;
Chris@320 135 }
Chris@320 136
Chris@690 137 SVDEBUG << "Initialising feature extraction plugin with channels = "
Chris@849 138 << channelCount << ", step = " << primaryTransform.getStepSize()
Chris@849 139 << ", block = " << primaryTransform.getBlockSize() << endl;
Chris@320 140
Chris@320 141 if (!m_plugin->initialise(channelCount,
Chris@849 142 primaryTransform.getStepSize(),
Chris@849 143 primaryTransform.getBlockSize())) {
Chris@361 144
Chris@930 145 int pstep = primaryTransform.getStepSize();
Chris@930 146 int pblock = primaryTransform.getBlockSize();
Chris@361 147
Chris@850 148 ///!!! hang on, this isn't right -- we're modifying a copy
Chris@849 149 primaryTransform.setStepSize(0);
Chris@849 150 primaryTransform.setBlockSize(0);
Chris@361 151 TransformFactory::getInstance()->makeContextConsistentWithPlugin
Chris@849 152 (primaryTransform, m_plugin);
Chris@361 153
Chris@849 154 if (primaryTransform.getStepSize() != pstep ||
Chris@849 155 primaryTransform.getBlockSize() != pblock) {
Chris@361 156
Chris@361 157 if (!m_plugin->initialise(channelCount,
Chris@849 158 primaryTransform.getStepSize(),
Chris@849 159 primaryTransform.getBlockSize())) {
Chris@361 160
Chris@361 161 m_message = tr("Failed to initialise feature extraction plugin \"%1\"").arg(pluginId);
Chris@849 162 return false;
Chris@361 163
Chris@361 164 } else {
Chris@361 165
Chris@361 166 m_message = tr("Feature extraction plugin \"%1\" rejected the given step and block sizes (%2 and %3); using plugin defaults (%4 and %5) instead")
Chris@361 167 .arg(pluginId)
Chris@361 168 .arg(pstep)
Chris@361 169 .arg(pblock)
Chris@849 170 .arg(primaryTransform.getStepSize())
Chris@849 171 .arg(primaryTransform.getBlockSize());
Chris@361 172 }
Chris@361 173
Chris@361 174 } else {
Chris@361 175
Chris@361 176 m_message = tr("Failed to initialise feature extraction plugin \"%1\"").arg(pluginId);
Chris@849 177 return false;
Chris@361 178 }
Chris@320 179 }
Chris@320 180
Chris@849 181 if (primaryTransform.getPluginVersion() != "") {
Chris@366 182 QString pv = QString("%1").arg(m_plugin->getPluginVersion());
Chris@849 183 if (pv != primaryTransform.getPluginVersion()) {
Chris@366 184 QString vm = tr("Transform was configured for version %1 of plugin \"%2\", but the plugin being used is version %3")
Chris@849 185 .arg(primaryTransform.getPluginVersion())
Chris@366 186 .arg(pluginId)
Chris@366 187 .arg(pv);
Chris@366 188 if (m_message != "") {
Chris@366 189 m_message = QString("%1; %2").arg(vm).arg(m_message);
Chris@366 190 } else {
Chris@366 191 m_message = vm;
Chris@366 192 }
Chris@366 193 }
Chris@366 194 }
Chris@366 195
Chris@320 196 Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors();
Chris@320 197
Chris@320 198 if (outputs.empty()) {
Chris@361 199 m_message = tr("Plugin \"%1\" has no outputs").arg(pluginId);
Chris@849 200 return false;
Chris@320 201 }
Chris@320 202
Chris@849 203 for (int j = 0; j < (int)m_transforms.size(); ++j) {
Chris@849 204
Chris@849 205 for (int i = 0; i < (int)outputs.size(); ++i) {
Chris@849 206 // SVDEBUG << "comparing output " << i << " name \"" << outputs[i].identifier << "\" with expected \"" << m_transform.getOutput() << "\"" << endl;
Chris@849 207 if (m_transforms[j].getOutput() == "" ||
Chris@849 208 outputs[i].identifier == m_transforms[j].getOutput().toStdString()) {
Chris@849 209 m_outputNos.push_back(i);
Chris@849 210 m_descriptors.push_back(new Vamp::Plugin::OutputDescriptor(outputs[i]));
Chris@849 211 m_fixedRateFeatureNos.push_back(-1); // we increment before use
Chris@849 212 break;
Chris@849 213 }
Chris@849 214 }
Chris@849 215
Chris@930 216 if ((int)m_descriptors.size() <= j) {
Chris@849 217 m_message = tr("Plugin \"%1\" has no output named \"%2\"")
Chris@849 218 .arg(pluginId)
Chris@849 219 .arg(m_transforms[j].getOutput());
Chris@849 220 return false;
Chris@849 221 }
Chris@320 222 }
Chris@320 223
Chris@849 224 for (int j = 0; j < (int)m_transforms.size(); ++j) {
Chris@876 225 createOutputModels(j);
Chris@849 226 }
Chris@849 227
Chris@1211 228 m_outputMutex.lock();
Chris@1211 229 m_haveOutputs = true;
Chris@1211 230 m_outputsCondition.wakeAll();
Chris@1211 231 m_outputMutex.unlock();
Chris@1211 232
Chris@849 233 return true;
Chris@558 234 }
Chris@558 235
Chris@558 236 void
Chris@876 237 FeatureExtractionModelTransformer::createOutputModels(int n)
Chris@558 238 {
Chris@558 239 DenseTimeValueModel *input = getConformingInput();
Chris@558 240
Chris@843 241 // cerr << "FeatureExtractionModelTransformer::createOutputModel: sample type " << m_descriptor->sampleType << ", rate " << m_descriptor->sampleRate << endl;
Chris@712 242
Chris@849 243 PluginRDFDescription description(m_transforms[n].getPluginIdentifier());
Chris@849 244 QString outputId = m_transforms[n].getOutput();
Chris@558 245
Chris@320 246 int binCount = 1;
Chris@320 247 float minValue = 0.0, maxValue = 0.0;
Chris@320 248 bool haveExtents = false;
Chris@876 249 bool haveBinCount = m_descriptors[n]->hasFixedBinCount;
Chris@876 250
Chris@876 251 if (haveBinCount) {
Chris@1039 252 binCount = (int)m_descriptors[n]->binCount;
Chris@320 253 }
Chris@320 254
Chris@876 255 m_needAdditionalModels[n] = false;
Chris@876 256
Chris@843 257 // cerr << "FeatureExtractionModelTransformer: output bin count "
Chris@843 258 // << binCount << endl;
Chris@320 259
Chris@849 260 if (binCount > 0 && m_descriptors[n]->hasKnownExtents) {
Chris@849 261 minValue = m_descriptors[n]->minValue;
Chris@849 262 maxValue = m_descriptors[n]->maxValue;
Chris@320 263 haveExtents = true;
Chris@320 264 }
Chris@320 265
Chris@1040 266 sv_samplerate_t modelRate = input->getSampleRate();
Chris@930 267 int modelResolution = 1;
Chris@712 268
Chris@849 269 if (m_descriptors[n]->sampleType !=
Chris@785 270 Vamp::Plugin::OutputDescriptor::OneSamplePerStep) {
Chris@849 271 if (m_descriptors[n]->sampleRate > input->getSampleRate()) {
Chris@843 272 cerr << "WARNING: plugin reports output sample rate as "
Chris@849 273 << m_descriptors[n]->sampleRate << " (can't display features with finer resolution than the input rate of " << input->getSampleRate() << ")" << endl;
Chris@785 274 }
Chris@785 275 }
Chris@785 276
Chris@849 277 switch (m_descriptors[n]->sampleType) {
Chris@320 278
Chris@320 279 case Vamp::Plugin::OutputDescriptor::VariableSampleRate:
Chris@849 280 if (m_descriptors[n]->sampleRate != 0.0) {
Chris@1040 281 modelResolution = int(round(modelRate / m_descriptors[n]->sampleRate));
Chris@320 282 }
Chris@320 283 break;
Chris@320 284
Chris@320 285 case Vamp::Plugin::OutputDescriptor::OneSamplePerStep:
Chris@849 286 modelResolution = m_transforms[n].getStepSize();
Chris@320 287 break;
Chris@320 288
Chris@320 289 case Vamp::Plugin::OutputDescriptor::FixedSampleRate:
Chris@451 290 //!!! SV doesn't actually support display of models that have
Chris@451 291 //!!! different underlying rates together -- so we always set
Chris@451 292 //!!! the model rate to be the input model's rate, and adjust
Chris@451 293 //!!! the resolution appropriately. We can't properly display
Chris@451 294 //!!! data with a higher resolution than the base model at all
Chris@849 295 if (m_descriptors[n]->sampleRate > input->getSampleRate()) {
Chris@451 296 modelResolution = 1;
Chris@1071 297 } else if (m_descriptors[n]->sampleRate <= 0.0) {
Chris@1071 298 cerr << "WARNING: Fixed sample-rate plugin reports invalid sample rate " << m_descriptors[n]->sampleRate << "; defaulting to input rate of " << input->getSampleRate() << endl;
Chris@1071 299 modelResolution = 1;
Chris@451 300 } else {
Chris@1040 301 modelResolution = int(round(modelRate / m_descriptors[n]->sampleRate));
Chris@451 302 }
Chris@320 303 break;
Chris@320 304 }
Chris@320 305
Chris@441 306 bool preDurationPlugin = (m_plugin->getVampApiVersion() < 2);
Chris@441 307
Chris@849 308 Model *out = 0;
Chris@849 309
Chris@441 310 if (binCount == 0 &&
Chris@849 311 (preDurationPlugin || !m_descriptors[n]->hasDuration)) {
Chris@320 312
Chris@445 313 // Anything with no value and no duration is an instant
Chris@445 314
Chris@849 315 out = new SparseOneDimensionalModel(modelRate, modelResolution, false);
Chris@558 316 QString outputEventTypeURI = description.getOutputEventTypeURI(outputId);
Chris@849 317 out->setRDFTypeURI(outputEventTypeURI);
Chris@558 318
Chris@441 319 } else if ((preDurationPlugin && binCount > 1 &&
Chris@849 320 (m_descriptors[n]->sampleType ==
Chris@441 321 Vamp::Plugin::OutputDescriptor::VariableSampleRate)) ||
Chris@849 322 (!preDurationPlugin && m_descriptors[n]->hasDuration)) {
Chris@441 323
Chris@441 324 // For plugins using the old v1 API without explicit duration,
Chris@441 325 // we treat anything that has multiple bins (i.e. that has the
Chris@441 326 // potential to have value and duration) and a variable sample
Chris@441 327 // rate as a note model, taking its values as pitch, duration
Chris@441 328 // and velocity (if present) respectively. This is the same
Chris@441 329 // behaviour as always applied by SV to these plugins in the
Chris@441 330 // past.
Chris@441 331
Chris@441 332 // For plugins with the newer API, we treat anything with
Chris@441 333 // duration as either a note model with pitch and velocity, or
Chris@441 334 // a region model.
Chris@441 335
Chris@441 336 // How do we know whether it's an interval or note model?
Chris@441 337 // What's the essential difference? Is a note model any
Chris@441 338 // interval model using a Hz or "MIDI pitch" scale? There
Chris@441 339 // isn't really a reliable test for "MIDI pitch"... Does a
Chris@441 340 // note model always have velocity? This is a good question
Chris@441 341 // to be addressed by accompanying RDF, but for the moment we
Chris@441 342 // will do the following...
Chris@441 343
Chris@441 344 bool isNoteModel = false;
Chris@441 345
Chris@441 346 // Regions have only value (and duration -- we can't extract a
Chris@441 347 // region model from an old-style plugin that doesn't support
Chris@441 348 // duration)
Chris@441 349 if (binCount > 1) isNoteModel = true;
Chris@441 350
Chris@595 351 // Regions do not have units of Hz or MIDI things (a sweeping
Chris@595 352 // assumption!)
Chris@849 353 if (m_descriptors[n]->unit == "Hz" ||
Chris@849 354 m_descriptors[n]->unit.find("MIDI") != std::string::npos ||
Chris@849 355 m_descriptors[n]->unit.find("midi") != std::string::npos) {
Chris@595 356 isNoteModel = true;
Chris@595 357 }
Chris@441 358
Chris@441 359 // If we had a "sparse 3D model", we would have the additional
Chris@441 360 // problem of determining whether to use that here (if bin
Chris@441 361 // count > 1). But we don't.
Chris@441 362
Chris@859 363 QSettings settings;
Chris@859 364 settings.beginGroup("Transformer");
Chris@859 365 bool flexi = settings.value("use-flexi-note-model", false).toBool();
Chris@859 366 settings.endGroup();
Chris@859 367
Chris@859 368 cerr << "flexi = " << flexi << endl;
Chris@859 369
Chris@859 370 if (isNoteModel && !flexi) {
Chris@441 371
Chris@441 372 NoteModel *model;
Chris@441 373 if (haveExtents) {
Chris@859 374 model = new NoteModel
Chris@859 375 (modelRate, modelResolution, minValue, maxValue, false);
Chris@441 376 } else {
Chris@859 377 model = new NoteModel
Chris@859 378 (modelRate, modelResolution, false);
gyorgyf@786 379 }
Chris@849 380 model->setScaleUnits(m_descriptors[n]->unit.c_str());
Chris@849 381 out = model;
gyorgyf@786 382
Chris@859 383 } else if (isNoteModel && flexi) {
gyorgyf@786 384
gyorgyf@786 385 FlexiNoteModel *model;
gyorgyf@786 386 if (haveExtents) {
Chris@859 387 model = new FlexiNoteModel
Chris@859 388 (modelRate, modelResolution, minValue, maxValue, false);
gyorgyf@786 389 } else {
Chris@859 390 model = new FlexiNoteModel
Chris@859 391 (modelRate, modelResolution, false);
Chris@441 392 }
Chris@849 393 model->setScaleUnits(m_descriptors[n]->unit.c_str());
Chris@849 394 out = model;
Chris@441 395
Chris@441 396 } else {
Chris@441 397
Chris@441 398 RegionModel *model;
Chris@441 399 if (haveExtents) {
Chris@441 400 model = new RegionModel
Chris@441 401 (modelRate, modelResolution, minValue, maxValue, false);
Chris@441 402 } else {
Chris@441 403 model = new RegionModel
Chris@441 404 (modelRate, modelResolution, false);
Chris@441 405 }
Chris@849 406 model->setScaleUnits(m_descriptors[n]->unit.c_str());
Chris@849 407 out = model;
Chris@441 408 }
Chris@441 409
Chris@558 410 QString outputEventTypeURI = description.getOutputEventTypeURI(outputId);
Chris@849 411 out->setRDFTypeURI(outputEventTypeURI);
Chris@558 412
Chris@876 413 } else if (binCount == 1 ||
Chris@849 414 (m_descriptors[n]->sampleType ==
Chris@441 415 Vamp::Plugin::OutputDescriptor::VariableSampleRate)) {
Chris@441 416
Chris@441 417 // Anything that is not a 1D, note, or interval model and that
Chris@441 418 // has only one value per result must be a sparse time value
Chris@441 419 // model.
Chris@441 420
Chris@441 421 // Anything that is not a 1D, note, or interval model and that
Chris@876 422 // has a variable sample rate is treated as a set of sparse
Chris@876 423 // time value models, one per output bin, because we lack a
Chris@441 424 // sparse 3D model.
Chris@320 425
Chris@876 426 // Anything that is not a 1D, note, or interval model and that
Chris@876 427 // has a fixed sample rate but an unknown number of values per
Chris@876 428 // result is also treated as a set of sparse time value models.
Chris@876 429
Chris@876 430 // For sets of sparse time value models, we create a single
Chris@876 431 // model first as the "standard" output and then create models
Chris@876 432 // for bins 1+ in the additional model map (mapping the output
Chris@876 433 // descriptor to a list of models indexed by bin-1). But we
Chris@876 434 // don't create the additional models yet, as this case has to
Chris@876 435 // work even if the number of bins is unknown at this point --
Chris@877 436 // we create an additional model (copying its parameters from
Chris@877 437 // the default one) each time a new bin is encountered.
Chris@876 438
Chris@876 439 if (!haveBinCount || binCount > 1) {
Chris@876 440 m_needAdditionalModels[n] = true;
Chris@876 441 }
Chris@876 442
Chris@320 443 SparseTimeValueModel *model;
Chris@320 444 if (haveExtents) {
Chris@320 445 model = new SparseTimeValueModel
Chris@320 446 (modelRate, modelResolution, minValue, maxValue, false);
Chris@320 447 } else {
Chris@320 448 model = new SparseTimeValueModel
Chris@320 449 (modelRate, modelResolution, false);
Chris@320 450 }
Chris@558 451
Chris@558 452 Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors();
Chris@849 453 model->setScaleUnits(outputs[m_outputNos[n]].unit.c_str());
Chris@320 454
Chris@849 455 out = model;
Chris@320 456
Chris@558 457 QString outputEventTypeURI = description.getOutputEventTypeURI(outputId);
Chris@849 458 out->setRDFTypeURI(outputEventTypeURI);
Chris@558 459
Chris@441 460 } else {
Chris@320 461
Chris@441 462 // Anything that is not a 1D, note, or interval model and that
Chris@441 463 // has a fixed sample rate and more than one value per result
Chris@441 464 // must be a dense 3D model.
Chris@320 465
Chris@320 466 EditableDenseThreeDimensionalModel *model =
Chris@320 467 new EditableDenseThreeDimensionalModel
Chris@535 468 (modelRate, modelResolution, binCount,
Chris@535 469 EditableDenseThreeDimensionalModel::BasicMultirateCompression,
Chris@535 470 false);
Chris@320 471
Chris@849 472 if (!m_descriptors[n]->binNames.empty()) {
Chris@320 473 std::vector<QString> names;
Chris@930 474 for (int i = 0; i < (int)m_descriptors[n]->binNames.size(); ++i) {
Chris@849 475 names.push_back(m_descriptors[n]->binNames[i].c_str());
Chris@320 476 }
Chris@320 477 model->setBinNames(names);
Chris@320 478 }
Chris@320 479
Chris@849 480 out = model;
Chris@558 481
Chris@558 482 QString outputSignalTypeURI = description.getOutputSignalTypeURI(outputId);
Chris@849 483 out->setRDFTypeURI(outputSignalTypeURI);
Chris@320 484 }
Chris@333 485
Chris@849 486 if (out) {
Chris@849 487 out->setSourceModel(input);
Chris@849 488 m_outputs.push_back(out);
Chris@849 489 }
Chris@320 490 }
Chris@320 491
Chris@1211 492 void
Chris@1211 493 FeatureExtractionModelTransformer::awaitOutputModels()
Chris@1211 494 {
Chris@1211 495 m_outputMutex.lock();
Chris@1211 496 while (!m_haveOutputs) {
Chris@1211 497 m_outputsCondition.wait(&m_outputMutex);
Chris@1211 498 }
Chris@1211 499 m_outputMutex.unlock();
Chris@1211 500 }
Chris@1211 501
Chris@331 502 FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer()
Chris@320 503 {
Chris@690 504 // SVDEBUG << "FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer()" << endl;
Chris@320 505 delete m_plugin;
Chris@930 506 for (int j = 0; j < (int)m_descriptors.size(); ++j) {
Chris@850 507 delete m_descriptors[j];
Chris@850 508 }
Chris@320 509 }
Chris@320 510
Chris@876 511 FeatureExtractionModelTransformer::Models
Chris@876 512 FeatureExtractionModelTransformer::getAdditionalOutputModels()
Chris@876 513 {
Chris@876 514 Models mm;
Chris@876 515 for (AdditionalModelMap::iterator i = m_additionalModels.begin();
Chris@876 516 i != m_additionalModels.end(); ++i) {
Chris@876 517 for (std::map<int, SparseTimeValueModel *>::iterator j =
Chris@876 518 i->second.begin();
Chris@876 519 j != i->second.end(); ++j) {
Chris@876 520 SparseTimeValueModel *m = j->second;
Chris@876 521 if (m) mm.push_back(m);
Chris@876 522 }
Chris@876 523 }
Chris@876 524 return mm;
Chris@876 525 }
Chris@876 526
Chris@877 527 bool
Chris@877 528 FeatureExtractionModelTransformer::willHaveAdditionalOutputModels()
Chris@877 529 {
Chris@877 530 for (std::map<int, bool>::const_iterator i =
Chris@877 531 m_needAdditionalModels.begin();
Chris@877 532 i != m_needAdditionalModels.end(); ++i) {
Chris@877 533 if (i->second) return true;
Chris@877 534 }
Chris@877 535 return false;
Chris@877 536 }
Chris@877 537
Chris@876 538 SparseTimeValueModel *
Chris@876 539 FeatureExtractionModelTransformer::getAdditionalModel(int n, int binNo)
Chris@876 540 {
Chris@893 541 // std::cerr << "getAdditionalModel(" << n << ", " << binNo << ")" << std::endl;
Chris@876 542
Chris@876 543 if (binNo == 0) {
Chris@876 544 std::cerr << "Internal error: binNo == 0 in getAdditionalModel (should be using primary model)" << std::endl;
Chris@876 545 return 0;
Chris@876 546 }
Chris@876 547
Chris@876 548 if (!m_needAdditionalModels[n]) return 0;
Chris@876 549 if (!isOutput<SparseTimeValueModel>(n)) return 0;
Chris@876 550 if (m_additionalModels[n][binNo]) return m_additionalModels[n][binNo];
Chris@876 551
Chris@876 552 std::cerr << "getAdditionalModel(" << n << ", " << binNo << "): creating" << std::endl;
Chris@876 553
Chris@876 554 SparseTimeValueModel *baseModel = getConformingOutput<SparseTimeValueModel>(n);
Chris@876 555 if (!baseModel) return 0;
Chris@876 556
Chris@876 557 std::cerr << "getAdditionalModel(" << n << ", " << binNo << "): (from " << baseModel << ")" << std::endl;
Chris@876 558
Chris@876 559 SparseTimeValueModel *additional =
Chris@876 560 new SparseTimeValueModel(baseModel->getSampleRate(),
Chris@876 561 baseModel->getResolution(),
Chris@876 562 baseModel->getValueMinimum(),
Chris@876 563 baseModel->getValueMaximum(),
Chris@876 564 false);
Chris@876 565
Chris@876 566 additional->setScaleUnits(baseModel->getScaleUnits());
Chris@876 567 additional->setRDFTypeURI(baseModel->getRDFTypeURI());
Chris@876 568
Chris@876 569 m_additionalModels[n][binNo] = additional;
Chris@876 570 return additional;
Chris@876 571 }
Chris@876 572
Chris@320 573 DenseTimeValueModel *
Chris@350 574 FeatureExtractionModelTransformer::getConformingInput()
Chris@320 575 {
Chris@690 576 // SVDEBUG << "FeatureExtractionModelTransformer::getConformingInput: input model is " << getInputModel() << endl;
Chris@408 577
Chris@320 578 DenseTimeValueModel *dtvm =
Chris@320 579 dynamic_cast<DenseTimeValueModel *>(getInputModel());
Chris@320 580 if (!dtvm) {
Chris@690 581 SVDEBUG << "FeatureExtractionModelTransformer::getConformingInput: WARNING: Input model is not conformable to DenseTimeValueModel" << endl;
Chris@320 582 }
Chris@320 583 return dtvm;
Chris@320 584 }
Chris@320 585
Chris@320 586 void
Chris@331 587 FeatureExtractionModelTransformer::run()
Chris@320 588 {
Chris@1211 589 initialise();
Chris@1211 590
Chris@350 591 DenseTimeValueModel *input = getConformingInput();
Chris@320 592 if (!input) return;
Chris@320 593
Chris@849 594 if (m_outputs.empty()) return;
Chris@320 595
Chris@850 596 Transform primaryTransform = m_transforms[0];
Chris@850 597
Chris@497 598 while (!input->isReady() && !m_abandoned) {
Chris@877 599 cerr << "FeatureExtractionModelTransformer::run: Waiting for input model to be ready..." << endl;
Chris@497 600 usleep(500000);
Chris@320 601 }
Chris@497 602 if (m_abandoned) return;
Chris@320 603
Chris@1040 604 sv_samplerate_t sampleRate = input->getSampleRate();
Chris@320 605
Chris@930 606 int channelCount = input->getChannelCount();
Chris@930 607 if ((int)m_plugin->getMaxChannelCount() < channelCount) {
Chris@320 608 channelCount = 1;
Chris@320 609 }
Chris@320 610
Chris@320 611 float **buffers = new float*[channelCount];
Chris@930 612 for (int ch = 0; ch < channelCount; ++ch) {
Chris@850 613 buffers[ch] = new float[primaryTransform.getBlockSize() + 2];
Chris@320 614 }
Chris@320 615
Chris@930 616 int stepSize = primaryTransform.getStepSize();
Chris@930 617 int blockSize = primaryTransform.getBlockSize();
Chris@350 618
Chris@320 619 bool frequencyDomain = (m_plugin->getInputDomain() ==
Chris@320 620 Vamp::Plugin::FrequencyDomain);
Chris@320 621 std::vector<FFTModel *> fftModels;
Chris@320 622
Chris@320 623 if (frequencyDomain) {
Chris@930 624 for (int ch = 0; ch < channelCount; ++ch) {
Chris@320 625 FFTModel *model = new FFTModel
Chris@350 626 (getConformingInput(),
Chris@350 627 channelCount == 1 ? m_input.getChannel() : ch,
Chris@850 628 primaryTransform.getWindowType(),
Chris@350 629 blockSize,
Chris@350 630 stepSize,
Chris@1090 631 blockSize);
Chris@1080 632 if (!model->isOK() || model->getError() != "") {
Chris@1080 633 QString err = model->getError();
Chris@320 634 delete model;
Chris@850 635 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@850 636 setCompletion(j, 100);
Chris@850 637 }
Chris@387 638 //!!! need a better way to handle this -- previously we were using a QMessageBox but that isn't an appropriate thing to do here either
Chris@1080 639 throw AllocationFailed("Failed to create the FFT model for this feature extraction model transformer: error is: " + err);
Chris@320 640 }
Chris@320 641 fftModels.push_back(model);
Chris@1080 642 cerr << "created model for channel " << ch << endl;
Chris@320 643 }
Chris@320 644 }
Chris@320 645
Chris@1040 646 sv_frame_t startFrame = m_input.getModel()->getStartFrame();
Chris@1040 647 sv_frame_t endFrame = m_input.getModel()->getEndFrame();
Chris@320 648
Chris@850 649 RealTime contextStartRT = primaryTransform.getStartTime();
Chris@850 650 RealTime contextDurationRT = primaryTransform.getDuration();
Chris@350 651
Chris@1040 652 sv_frame_t contextStart =
Chris@350 653 RealTime::realTime2Frame(contextStartRT, sampleRate);
Chris@350 654
Chris@1040 655 sv_frame_t contextDuration =
Chris@350 656 RealTime::realTime2Frame(contextDurationRT, sampleRate);
Chris@320 657
Chris@320 658 if (contextStart == 0 || contextStart < startFrame) {
Chris@320 659 contextStart = startFrame;
Chris@320 660 }
Chris@320 661
Chris@320 662 if (contextDuration == 0) {
Chris@320 663 contextDuration = endFrame - contextStart;
Chris@320 664 }
Chris@320 665 if (contextStart + contextDuration > endFrame) {
Chris@320 666 contextDuration = endFrame - contextStart;
Chris@320 667 }
Chris@320 668
Chris@1039 669 sv_frame_t blockFrame = contextStart;
Chris@320 670
Chris@320 671 long prevCompletion = 0;
Chris@320 672
Chris@850 673 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@850 674 setCompletion(j, 0);
Chris@850 675 }
Chris@320 676
Chris@556 677 float *reals = 0;
Chris@556 678 float *imaginaries = 0;
Chris@556 679 if (frequencyDomain) {
Chris@556 680 reals = new float[blockSize/2 + 1];
Chris@556 681 imaginaries = new float[blockSize/2 + 1];
Chris@556 682 }
Chris@556 683
Chris@678 684 QString error = "";
Chris@678 685
Chris@320 686 while (!m_abandoned) {
Chris@320 687
Chris@320 688 if (frequencyDomain) {
Chris@350 689 if (blockFrame - int(blockSize)/2 >
Chris@320 690 contextStart + contextDuration) break;
Chris@320 691 } else {
Chris@320 692 if (blockFrame >=
Chris@320 693 contextStart + contextDuration) break;
Chris@320 694 }
Chris@320 695
Chris@690 696 // SVDEBUG << "FeatureExtractionModelTransformer::run: blockFrame "
Chris@320 697 // << blockFrame << ", endFrame " << endFrame << ", blockSize "
Chris@687 698 // << blockSize << endl;
Chris@320 699
Chris@1039 700 int completion = int
Chris@1039 701 ((((blockFrame - contextStart) / stepSize) * 99) /
Chris@1039 702 (contextDuration / stepSize + 1));
Chris@320 703
Chris@350 704 // channelCount is either m_input.getModel()->channelCount or 1
Chris@320 705
Chris@363 706 if (frequencyDomain) {
Chris@930 707 for (int ch = 0; ch < channelCount; ++ch) {
Chris@1039 708 int column = int((blockFrame - startFrame) / stepSize);
Chris@1008 709 if (fftModels[ch]->getValuesAt(column, reals, imaginaries)) {
Chris@1008 710 for (int i = 0; i <= blockSize/2; ++i) {
Chris@1008 711 buffers[ch][i*2] = reals[i];
Chris@1008 712 buffers[ch][i*2+1] = imaginaries[i];
Chris@1008 713 }
Chris@1008 714 } else {
Chris@1008 715 for (int i = 0; i <= blockSize/2; ++i) {
Chris@1008 716 buffers[ch][i*2] = 0.f;
Chris@1008 717 buffers[ch][i*2+1] = 0.f;
Chris@1008 718 }
Chris@1008 719 }
Chris@678 720 error = fftModels[ch]->getError();
Chris@678 721 if (error != "") {
Chris@843 722 cerr << "FeatureExtractionModelTransformer::run: Abandoning, error is " << error << endl;
Chris@678 723 m_abandoned = true;
Chris@678 724 m_message = error;
Chris@1080 725 break;
Chris@678 726 }
Chris@363 727 }
Chris@363 728 } else {
Chris@363 729 getFrames(channelCount, blockFrame, blockSize, buffers);
Chris@320 730 }
Chris@320 731
Chris@497 732 if (m_abandoned) break;
Chris@497 733
Chris@1211 734 cerr << "calling process() from thread "
Chris@1211 735 << QThread::currentThreadId() << endl;
Chris@1211 736
Chris@320 737 Vamp::Plugin::FeatureSet features = m_plugin->process
Chris@1040 738 (buffers, RealTime::frame2RealTime(blockFrame, sampleRate).toVampRealTime());
Chris@320 739
Chris@497 740 if (m_abandoned) break;
Chris@497 741
Chris@850 742 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@930 743 for (int fi = 0; fi < (int)features[m_outputNos[j]].size(); ++fi) {
Chris@850 744 Vamp::Plugin::Feature feature = features[m_outputNos[j]][fi];
Chris@850 745 addFeature(j, blockFrame, feature);
Chris@850 746 }
Chris@850 747 }
Chris@320 748
Chris@320 749 if (blockFrame == contextStart || completion > prevCompletion) {
Chris@850 750 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@850 751 setCompletion(j, completion);
Chris@850 752 }
Chris@320 753 prevCompletion = completion;
Chris@320 754 }
Chris@320 755
Chris@350 756 blockFrame += stepSize;
Chris@320 757 }
Chris@320 758
Chris@497 759 if (!m_abandoned) {
Chris@497 760 Vamp::Plugin::FeatureSet features = m_plugin->getRemainingFeatures();
Chris@320 761
Chris@850 762 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@930 763 for (int fi = 0; fi < (int)features[m_outputNos[j]].size(); ++fi) {
Chris@850 764 Vamp::Plugin::Feature feature = features[m_outputNos[j]][fi];
Chris@850 765 addFeature(j, blockFrame, feature);
Chris@850 766 }
Chris@497 767 }
Chris@497 768 }
Chris@320 769
Chris@850 770 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@850 771 setCompletion(j, 100);
Chris@850 772 }
Chris@320 773
Chris@320 774 if (frequencyDomain) {
Chris@930 775 for (int ch = 0; ch < channelCount; ++ch) {
Chris@320 776 delete fftModels[ch];
Chris@320 777 }
Chris@556 778 delete[] reals;
Chris@556 779 delete[] imaginaries;
Chris@320 780 }
Chris@974 781
Chris@974 782 for (int ch = 0; ch < channelCount; ++ch) {
Chris@974 783 delete[] buffers[ch];
Chris@974 784 }
Chris@974 785 delete[] buffers;
Chris@320 786 }
Chris@320 787
Chris@320 788 void
Chris@363 789 FeatureExtractionModelTransformer::getFrames(int channelCount,
Chris@1039 790 sv_frame_t startFrame,
Chris@1039 791 sv_frame_t size,
Chris@363 792 float **buffers)
Chris@320 793 {
Chris@1039 794 sv_frame_t offset = 0;
Chris@320 795
Chris@320 796 if (startFrame < 0) {
Chris@363 797 for (int c = 0; c < channelCount; ++c) {
Chris@1039 798 for (sv_frame_t i = 0; i < size && startFrame + i < 0; ++i) {
Chris@363 799 buffers[c][i] = 0.0f;
Chris@363 800 }
Chris@320 801 }
Chris@320 802 offset = -startFrame;
Chris@320 803 size -= offset;
Chris@320 804 if (size <= 0) return;
Chris@320 805 startFrame = 0;
Chris@320 806 }
Chris@320 807
Chris@350 808 DenseTimeValueModel *input = getConformingInput();
Chris@350 809 if (!input) return;
Chris@363 810
Chris@1039 811 sv_frame_t got = 0;
Chris@350 812
Chris@363 813 if (channelCount == 1) {
Chris@363 814
Chris@1096 815 auto data = input->getData(m_input.getChannel(), startFrame, size);
Chris@1096 816 got = data.size();
Chris@1096 817
Chris@1096 818 copy(data.begin(), data.end(), buffers[0] + offset);
Chris@363 819
Chris@363 820 if (m_input.getChannel() == -1 && input->getChannelCount() > 1) {
Chris@363 821 // use mean instead of sum, as plugin input
Chris@363 822 float cc = float(input->getChannelCount());
Chris@1096 823 for (sv_frame_t i = 0; i < got; ++i) {
Chris@363 824 buffers[0][i + offset] /= cc;
Chris@363 825 }
Chris@363 826 }
Chris@363 827
Chris@363 828 } else {
Chris@363 829
Chris@1096 830 auto data = input->getMultiChannelData(0, channelCount-1, startFrame, size);
Chris@1096 831 if (!data.empty()) {
Chris@1096 832 got = data[0].size();
Chris@1096 833 for (int c = 0; in_range_for(data, c); ++c) {
Chris@1096 834 copy(data[c].begin(), data[c].end(), buffers[c] + offset);
Chris@363 835 }
Chris@363 836 }
Chris@363 837 }
Chris@320 838
Chris@320 839 while (got < size) {
Chris@363 840 for (int c = 0; c < channelCount; ++c) {
Chris@363 841 buffers[c][got + offset] = 0.0;
Chris@363 842 }
Chris@320 843 ++got;
Chris@320 844 }
Chris@320 845 }
Chris@320 846
Chris@320 847 void
Chris@850 848 FeatureExtractionModelTransformer::addFeature(int n,
Chris@1039 849 sv_frame_t blockFrame,
Chris@850 850 const Vamp::Plugin::Feature &feature)
Chris@320 851 {
Chris@1040 852 sv_samplerate_t inputRate = m_input.getModel()->getSampleRate();
Chris@320 853
Chris@843 854 // cerr << "FeatureExtractionModelTransformer::addFeature: blockFrame = "
Chris@712 855 // << blockFrame << ", hasTimestamp = " << feature.hasTimestamp
Chris@712 856 // << ", timestamp = " << feature.timestamp << ", hasDuration = "
Chris@712 857 // << feature.hasDuration << ", duration = " << feature.duration
Chris@843 858 // << endl;
Chris@320 859
Chris@1039 860 sv_frame_t frame = blockFrame;
Chris@320 861
Chris@849 862 if (m_descriptors[n]->sampleType ==
Chris@320 863 Vamp::Plugin::OutputDescriptor::VariableSampleRate) {
Chris@320 864
Chris@320 865 if (!feature.hasTimestamp) {
Chris@843 866 cerr
Chris@331 867 << "WARNING: FeatureExtractionModelTransformer::addFeature: "
Chris@320 868 << "Feature has variable sample rate but no timestamp!"
Chris@843 869 << endl;
Chris@320 870 return;
Chris@320 871 } else {
Chris@1040 872 frame = RealTime::realTime2Frame(feature.timestamp, inputRate);
Chris@320 873 }
Chris@320 874
Chris@1071 875 // cerr << "variable sample rate: timestamp = " << feature.timestamp
Chris@1071 876 // << " at input rate " << inputRate << " -> " << frame << endl;
Chris@1071 877
Chris@849 878 } else if (m_descriptors[n]->sampleType ==
Chris@320 879 Vamp::Plugin::OutputDescriptor::FixedSampleRate) {
Chris@320 880
Chris@1071 881 sv_samplerate_t rate = m_descriptors[n]->sampleRate;
Chris@1071 882 if (rate <= 0.0) {
Chris@1071 883 rate = inputRate;
Chris@1071 884 }
Chris@1071 885
Chris@779 886 if (!feature.hasTimestamp) {
Chris@849 887 ++m_fixedRateFeatureNos[n];
Chris@779 888 } else {
Chris@779 889 RealTime ts(feature.timestamp.sec, feature.timestamp.nsec);
Chris@1071 890 m_fixedRateFeatureNos[n] = (int)lrint(ts.toDouble() * rate);
Chris@779 891 }
Chris@862 892
Chris@1071 893 // cerr << "m_fixedRateFeatureNo = " << m_fixedRateFeatureNos[n]
Chris@1071 894 // << ", m_descriptor->sampleRate = " << m_descriptors[n]->sampleRate
Chris@862 895 // << ", inputRate = " << inputRate
Chris@862 896 // << " giving frame = ";
Chris@1071 897 frame = lrint((double(m_fixedRateFeatureNos[n]) / rate) * inputRate);
Chris@1071 898 // cerr << frame << endl;
Chris@320 899 }
Chris@862 900
Chris@862 901 if (frame < 0) {
Chris@862 902 cerr
Chris@862 903 << "WARNING: FeatureExtractionModelTransformer::addFeature: "
Chris@862 904 << "Negative frame counts are not supported (frame = " << frame
Chris@862 905 << " from timestamp " << feature.timestamp
Chris@862 906 << "), dropping feature"
Chris@862 907 << endl;
Chris@862 908 return;
Chris@862 909 }
Chris@862 910
Chris@441 911 // Rather than repeat the complicated tests from the constructor
Chris@441 912 // to determine what sort of model we must be adding the features
Chris@441 913 // to, we instead test what sort of model the constructor decided
Chris@441 914 // to create.
Chris@320 915
Chris@849 916 if (isOutput<SparseOneDimensionalModel>(n)) {
Chris@441 917
Chris@441 918 SparseOneDimensionalModel *model =
Chris@849 919 getConformingOutput<SparseOneDimensionalModel>(n);
Chris@320 920 if (!model) return;
Chris@350 921
Chris@441 922 model->addPoint(SparseOneDimensionalModel::Point
Chris@441 923 (frame, feature.label.c_str()));
Chris@320 924
Chris@849 925 } else if (isOutput<SparseTimeValueModel>(n)) {
Chris@320 926
Chris@350 927 SparseTimeValueModel *model =
Chris@849 928 getConformingOutput<SparseTimeValueModel>(n);
Chris@320 929 if (!model) return;
Chris@350 930
Chris@930 931 for (int i = 0; i < (int)feature.values.size(); ++i) {
Chris@454 932
Chris@454 933 float value = feature.values[i];
Chris@454 934
Chris@454 935 QString label = feature.label.c_str();
Chris@454 936 if (feature.values.size() > 1) {
Chris@454 937 label = QString("[%1] %2").arg(i+1).arg(label);
Chris@454 938 }
Chris@454 939
Chris@876 940 SparseTimeValueModel *targetModel = model;
Chris@876 941
Chris@876 942 if (m_needAdditionalModels[n] && i > 0) {
Chris@876 943 targetModel = getAdditionalModel(n, i);
Chris@876 944 if (!targetModel) targetModel = model;
Chris@893 945 // std::cerr << "adding point to model " << targetModel
Chris@893 946 // << " for output " << n << " bin " << i << std::endl;
Chris@876 947 }
Chris@876 948
Chris@876 949 targetModel->addPoint
Chris@876 950 (SparseTimeValueModel::Point(frame, value, label));
Chris@454 951 }
Chris@320 952
Chris@849 953 } else if (isOutput<FlexiNoteModel>(n) || isOutput<NoteModel>(n) || isOutput<RegionModel>(n)) { //GF: Added Note Model
Chris@320 954
Chris@441 955 int index = 0;
Chris@441 956
Chris@441 957 float value = 0.0;
Chris@930 958 if ((int)feature.values.size() > index) {
Chris@441 959 value = feature.values[index++];
Chris@441 960 }
Chris@320 961
Chris@1039 962 sv_frame_t duration = 1;
Chris@441 963 if (feature.hasDuration) {
Chris@1040 964 duration = RealTime::realTime2Frame(feature.duration, inputRate);
Chris@441 965 } else {
Chris@1039 966 if (in_range_for(feature.values, index)) {
Chris@1039 967 duration = lrintf(feature.values[index++]);
Chris@441 968 }
Chris@441 969 }
gyorgyf@786 970
Chris@891 971 if (isOutput<FlexiNoteModel>(n)) { // GF: added for flexi note model
gyorgyf@786 972
gyorgyf@786 973 float velocity = 100;
Chris@930 974 if ((int)feature.values.size() > index) {
gyorgyf@786 975 velocity = feature.values[index++];
gyorgyf@786 976 }
gyorgyf@786 977 if (velocity < 0) velocity = 127;
gyorgyf@786 978 if (velocity > 127) velocity = 127;
gyorgyf@786 979
Chris@849 980 FlexiNoteModel *model = getConformingOutput<FlexiNoteModel>(n);
gyorgyf@786 981 if (!model) return;
Chris@1039 982 model->addPoint(FlexiNoteModel::Point(frame,
Chris@1039 983 value, // value is pitch
Chris@1039 984 duration,
Chris@1039 985 velocity / 127.f,
Chris@1039 986 feature.label.c_str()));
gyorgyf@786 987 // GF: end -- added for flexi note model
Chris@849 988 } else if (isOutput<NoteModel>(n)) {
Chris@320 989
Chris@441 990 float velocity = 100;
Chris@930 991 if ((int)feature.values.size() > index) {
Chris@441 992 velocity = feature.values[index++];
Chris@441 993 }
Chris@441 994 if (velocity < 0) velocity = 127;
Chris@441 995 if (velocity > 127) velocity = 127;
Chris@320 996
Chris@849 997 NoteModel *model = getConformingOutput<NoteModel>(n);
Chris@441 998 if (!model) return;
Chris@441 999 model->addPoint(NoteModel::Point(frame, value, // value is pitch
Chris@1039 1000 duration,
Chris@441 1001 velocity / 127.f,
Chris@441 1002 feature.label.c_str()));
Chris@441 1003 } else {
gyorgyf@786 1004
Chris@849 1005 RegionModel *model = getConformingOutput<RegionModel>(n);
Chris@454 1006 if (!model) return;
Chris@454 1007
Chris@474 1008 if (feature.hasDuration && !feature.values.empty()) {
Chris@454 1009
Chris@930 1010 for (int i = 0; i < (int)feature.values.size(); ++i) {
Chris@454 1011
Chris@454 1012 float value = feature.values[i];
Chris@454 1013
Chris@454 1014 QString label = feature.label.c_str();
Chris@454 1015 if (feature.values.size() > 1) {
Chris@454 1016 label = QString("[%1] %2").arg(i+1).arg(label);
Chris@454 1017 }
Chris@454 1018
Chris@1039 1019 model->addPoint(RegionModel::Point(frame,
Chris@1039 1020 value,
Chris@1039 1021 duration,
Chris@454 1022 label));
Chris@454 1023 }
Chris@454 1024 } else {
Chris@454 1025
Chris@1039 1026 model->addPoint(RegionModel::Point(frame,
Chris@1039 1027 value,
Chris@1039 1028 duration,
Chris@441 1029 feature.label.c_str()));
Chris@454 1030 }
Chris@441 1031 }
Chris@320 1032
Chris@849 1033 } else if (isOutput<EditableDenseThreeDimensionalModel>(n)) {
Chris@320 1034
Chris@1154 1035 DenseThreeDimensionalModel::Column values = feature.values;
Chris@320 1036
Chris@320 1037 EditableDenseThreeDimensionalModel *model =
Chris@849 1038 getConformingOutput<EditableDenseThreeDimensionalModel>(n);
Chris@320 1039 if (!model) return;
Chris@320 1040
Chris@889 1041 // cerr << "(note: model resolution = " << model->getResolution() << ")"
Chris@889 1042 // << endl;
Chris@889 1043
Chris@891 1044 if (!feature.hasTimestamp && m_fixedRateFeatureNos[n] >= 0) {
Chris@891 1045 model->setColumn(m_fixedRateFeatureNos[n], values);
Chris@889 1046 } else {
Chris@1039 1047 model->setColumn(int(frame / model->getResolution()), values);
Chris@889 1048 }
Chris@441 1049
Chris@441 1050 } else {
Chris@690 1051 SVDEBUG << "FeatureExtractionModelTransformer::addFeature: Unknown output model type!" << endl;
Chris@320 1052 }
Chris@320 1053 }
Chris@320 1054
Chris@320 1055 void
Chris@850 1056 FeatureExtractionModelTransformer::setCompletion(int n, int completion)
Chris@320 1057 {
Chris@690 1058 // SVDEBUG << "FeatureExtractionModelTransformer::setCompletion("
Chris@687 1059 // << completion << ")" << endl;
Chris@320 1060
Chris@849 1061 if (isOutput<SparseOneDimensionalModel>(n)) {
Chris@320 1062
Chris@350 1063 SparseOneDimensionalModel *model =
Chris@849 1064 getConformingOutput<SparseOneDimensionalModel>(n);
Chris@320 1065 if (!model) return;
Chris@923 1066 if (model->isAbandoning()) abandon();
Chris@441 1067 model->setCompletion(completion, true);
Chris@320 1068
Chris@849 1069 } else if (isOutput<SparseTimeValueModel>(n)) {
Chris@320 1070
Chris@350 1071 SparseTimeValueModel *model =
Chris@849 1072 getConformingOutput<SparseTimeValueModel>(n);
Chris@320 1073 if (!model) return;
Chris@923 1074 if (model->isAbandoning()) abandon();
Chris@441 1075 model->setCompletion(completion, true);
Chris@320 1076
Chris@849 1077 } else if (isOutput<NoteModel>(n)) {
Chris@320 1078
Chris@849 1079 NoteModel *model = getConformingOutput<NoteModel>(n);
Chris@320 1080 if (!model) return;
Chris@923 1081 if (model->isAbandoning()) abandon();
Chris@441 1082 model->setCompletion(completion, true);
gyorgyf@786 1083
Chris@923 1084 } else if (isOutput<FlexiNoteModel>(n)) {
gyorgyf@786 1085
Chris@849 1086 FlexiNoteModel *model = getConformingOutput<FlexiNoteModel>(n);
gyorgyf@786 1087 if (!model) return;
Chris@923 1088 if (model->isAbandoning()) abandon();
gyorgyf@786 1089 model->setCompletion(completion, true);
Chris@320 1090
Chris@849 1091 } else if (isOutput<RegionModel>(n)) {
Chris@441 1092
Chris@849 1093 RegionModel *model = getConformingOutput<RegionModel>(n);
Chris@441 1094 if (!model) return;
Chris@923 1095 if (model->isAbandoning()) abandon();
Chris@441 1096 model->setCompletion(completion, true);
Chris@441 1097
Chris@849 1098 } else if (isOutput<EditableDenseThreeDimensionalModel>(n)) {
Chris@320 1099
Chris@320 1100 EditableDenseThreeDimensionalModel *model =
Chris@849 1101 getConformingOutput<EditableDenseThreeDimensionalModel>(n);
Chris@320 1102 if (!model) return;
Chris@923 1103 if (model->isAbandoning()) abandon();
Chris@350 1104 model->setCompletion(completion, true); //!!!m_context.updates);
Chris@320 1105 }
Chris@320 1106 }
Chris@320 1107