annotate transform/FeatureExtractionModelTransformer.cpp @ 1248:58dd6a6fe414 piper

Update to use listargs variant of Piper stuff (so that the plugin winnowing feature from the penultimate commit actually works)
author Chris Cannam
date Thu, 03 Nov 2016 15:38:17 +0000
parents a83541a1f100
children cbdd534f517a
rev   line source
Chris@320 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@320 2
Chris@320 3 /*
Chris@320 4 Sonic Visualiser
Chris@320 5 An audio file viewer and annotation editor.
Chris@320 6 Centre for Digital Music, Queen Mary, University of London.
Chris@320 7 This file copyright 2006 Chris Cannam and QMUL.
Chris@320 8
Chris@320 9 This program is free software; you can redistribute it and/or
Chris@320 10 modify it under the terms of the GNU General Public License as
Chris@320 11 published by the Free Software Foundation; either version 2 of the
Chris@320 12 License, or (at your option) any later version. See the file
Chris@320 13 COPYING included with this distribution for more information.
Chris@320 14 */
Chris@320 15
Chris@331 16 #include "FeatureExtractionModelTransformer.h"
Chris@320 17
Chris@320 18 #include "plugin/FeatureExtractionPluginFactory.h"
Chris@1225 19
Chris@320 20 #include "plugin/PluginXml.h"
Chris@475 21 #include <vamp-hostsdk/Plugin.h>
Chris@320 22
Chris@320 23 #include "data/model/Model.h"
Chris@320 24 #include "base/Window.h"
Chris@387 25 #include "base/Exceptions.h"
Chris@320 26 #include "data/model/SparseOneDimensionalModel.h"
Chris@320 27 #include "data/model/SparseTimeValueModel.h"
Chris@320 28 #include "data/model/EditableDenseThreeDimensionalModel.h"
Chris@320 29 #include "data/model/DenseTimeValueModel.h"
Chris@320 30 #include "data/model/NoteModel.h"
gyorgyf@786 31 #include "data/model/FlexiNoteModel.h"
Chris@441 32 #include "data/model/RegionModel.h"
Chris@320 33 #include "data/model/FFTModel.h"
Chris@320 34 #include "data/model/WaveFileModel.h"
Chris@558 35 #include "rdf/PluginRDFDescription.h"
Chris@320 36
Chris@350 37 #include "TransformFactory.h"
Chris@350 38
Chris@320 39 #include <iostream>
Chris@320 40
Chris@859 41 #include <QSettings>
Chris@859 42
Chris@350 43 FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Input in,
Chris@859 44 const Transform &transform) :
Chris@350 45 ModelTransformer(in, transform),
Chris@1211 46 m_plugin(0),
Chris@1211 47 m_haveOutputs(false)
Chris@320 48 {
Chris@1080 49 SVDEBUG << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: plugin " << m_transforms.begin()->getPluginIdentifier() << ", outputName " << m_transforms.begin()->getOutput() << endl;
Chris@849 50 }
Chris@849 51
Chris@849 52 FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Input in,
Chris@859 53 const Transforms &transforms) :
Chris@849 54 ModelTransformer(in, transforms),
Chris@1211 55 m_plugin(0),
Chris@1211 56 m_haveOutputs(false)
Chris@849 57 {
Chris@1080 58 if (m_transforms.empty()) {
Chris@1080 59 SVDEBUG << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: " << transforms.size() << " transform(s)" << endl;
Chris@1080 60 } else {
Chris@1080 61 SVDEBUG << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: " << transforms.size() << " transform(s), first has plugin " << m_transforms.begin()->getPluginIdentifier() << ", outputName " << m_transforms.begin()->getOutput() << endl;
Chris@1080 62 }
Chris@849 63 }
Chris@849 64
Chris@849 65 static bool
Chris@849 66 areTransformsSimilar(const Transform &t1, const Transform &t2)
Chris@849 67 {
Chris@849 68 Transform t2o(t2);
Chris@849 69 t2o.setOutput(t1.getOutput());
Chris@849 70 return t1 == t2o;
Chris@849 71 }
Chris@849 72
Chris@849 73 bool
Chris@849 74 FeatureExtractionModelTransformer::initialise()
Chris@849 75 {
Chris@1237 76 // This is (now) called from the run thread. The plugin is
Chris@1237 77 // constructed, initialised, used, and destroyed all from a single
Chris@1237 78 // thread.
Chris@1237 79
Chris@849 80 // All transforms must use the same plugin, parameters, and
Chris@849 81 // inputs: they can differ only in choice of plugin output. So we
Chris@849 82 // initialise based purely on the first transform in the list (but
Chris@849 83 // first check that they are actually similar as promised)
Chris@849 84
Chris@849 85 for (int j = 1; j < (int)m_transforms.size(); ++j) {
Chris@849 86 if (!areTransformsSimilar(m_transforms[0], m_transforms[j])) {
Chris@849 87 m_message = tr("Transforms supplied to a single FeatureExtractionModelTransformer instance must be similar in every respect except plugin output");
Chris@849 88 return false;
Chris@849 89 }
Chris@849 90 }
Chris@849 91
Chris@849 92 Transform primaryTransform = m_transforms[0];
Chris@849 93
Chris@849 94 QString pluginId = primaryTransform.getPluginIdentifier();
Chris@320 95
Chris@1226 96 FeatureExtractionPluginFactory *factory =
Chris@1226 97 FeatureExtractionPluginFactory::instance();
Chris@320 98
Chris@320 99 if (!factory) {
Chris@361 100 m_message = tr("No factory available for feature extraction plugin id \"%1\" (unknown plugin type, or internal error?)").arg(pluginId);
Chris@849 101 return false;
Chris@320 102 }
Chris@320 103
Chris@350 104 DenseTimeValueModel *input = getConformingInput();
Chris@350 105 if (!input) {
Chris@361 106 m_message = tr("Input model for feature extraction plugin \"%1\" is of wrong type (internal error?)").arg(pluginId);
Chris@849 107 return false;
Chris@350 108 }
Chris@320 109
Chris@1211 110 cerr << "instantiating plugin for transform in thread "
Chris@1211 111 << QThread::currentThreadId() << endl;
Chris@1211 112
Chris@1040 113 m_plugin = factory->instantiatePlugin(pluginId, input->getSampleRate());
Chris@320 114 if (!m_plugin) {
Chris@361 115 m_message = tr("Failed to instantiate plugin \"%1\"").arg(pluginId);
Chris@849 116 return false;
Chris@320 117 }
Chris@320 118
Chris@350 119 TransformFactory::getInstance()->makeContextConsistentWithPlugin
Chris@849 120 (primaryTransform, m_plugin);
Chris@343 121
Chris@350 122 TransformFactory::getInstance()->setPluginParameters
Chris@849 123 (primaryTransform, m_plugin);
Chris@320 124
Chris@930 125 int channelCount = input->getChannelCount();
Chris@930 126 if ((int)m_plugin->getMaxChannelCount() < channelCount) {
Chris@320 127 channelCount = 1;
Chris@320 128 }
Chris@930 129 if ((int)m_plugin->getMinChannelCount() > channelCount) {
Chris@361 130 m_message = tr("Cannot provide enough channels to feature extraction plugin \"%1\" (plugin min is %2, max %3; input model has %4)")
Chris@361 131 .arg(pluginId)
Chris@361 132 .arg(m_plugin->getMinChannelCount())
Chris@361 133 .arg(m_plugin->getMaxChannelCount())
Chris@361 134 .arg(input->getChannelCount());
Chris@849 135 return false;
Chris@320 136 }
Chris@320 137
Chris@690 138 SVDEBUG << "Initialising feature extraction plugin with channels = "
Chris@849 139 << channelCount << ", step = " << primaryTransform.getStepSize()
Chris@849 140 << ", block = " << primaryTransform.getBlockSize() << endl;
Chris@320 141
Chris@320 142 if (!m_plugin->initialise(channelCount,
Chris@849 143 primaryTransform.getStepSize(),
Chris@849 144 primaryTransform.getBlockSize())) {
Chris@361 145
Chris@930 146 int pstep = primaryTransform.getStepSize();
Chris@930 147 int pblock = primaryTransform.getBlockSize();
Chris@361 148
Chris@850 149 ///!!! hang on, this isn't right -- we're modifying a copy
Chris@849 150 primaryTransform.setStepSize(0);
Chris@849 151 primaryTransform.setBlockSize(0);
Chris@361 152 TransformFactory::getInstance()->makeContextConsistentWithPlugin
Chris@849 153 (primaryTransform, m_plugin);
Chris@361 154
Chris@849 155 if (primaryTransform.getStepSize() != pstep ||
Chris@849 156 primaryTransform.getBlockSize() != pblock) {
Chris@361 157
Chris@361 158 if (!m_plugin->initialise(channelCount,
Chris@849 159 primaryTransform.getStepSize(),
Chris@849 160 primaryTransform.getBlockSize())) {
Chris@361 161
Chris@361 162 m_message = tr("Failed to initialise feature extraction plugin \"%1\"").arg(pluginId);
Chris@849 163 return false;
Chris@361 164
Chris@361 165 } else {
Chris@361 166
Chris@361 167 m_message = tr("Feature extraction plugin \"%1\" rejected the given step and block sizes (%2 and %3); using plugin defaults (%4 and %5) instead")
Chris@361 168 .arg(pluginId)
Chris@361 169 .arg(pstep)
Chris@361 170 .arg(pblock)
Chris@849 171 .arg(primaryTransform.getStepSize())
Chris@849 172 .arg(primaryTransform.getBlockSize());
Chris@361 173 }
Chris@361 174
Chris@361 175 } else {
Chris@361 176
Chris@361 177 m_message = tr("Failed to initialise feature extraction plugin \"%1\"").arg(pluginId);
Chris@849 178 return false;
Chris@361 179 }
Chris@320 180 }
Chris@320 181
Chris@849 182 if (primaryTransform.getPluginVersion() != "") {
Chris@366 183 QString pv = QString("%1").arg(m_plugin->getPluginVersion());
Chris@849 184 if (pv != primaryTransform.getPluginVersion()) {
Chris@366 185 QString vm = tr("Transform was configured for version %1 of plugin \"%2\", but the plugin being used is version %3")
Chris@849 186 .arg(primaryTransform.getPluginVersion())
Chris@366 187 .arg(pluginId)
Chris@366 188 .arg(pv);
Chris@366 189 if (m_message != "") {
Chris@366 190 m_message = QString("%1; %2").arg(vm).arg(m_message);
Chris@366 191 } else {
Chris@366 192 m_message = vm;
Chris@366 193 }
Chris@366 194 }
Chris@366 195 }
Chris@366 196
Chris@320 197 Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors();
Chris@320 198
Chris@320 199 if (outputs.empty()) {
Chris@361 200 m_message = tr("Plugin \"%1\" has no outputs").arg(pluginId);
Chris@849 201 return false;
Chris@320 202 }
Chris@320 203
Chris@849 204 for (int j = 0; j < (int)m_transforms.size(); ++j) {
Chris@849 205
Chris@849 206 for (int i = 0; i < (int)outputs.size(); ++i) {
Chris@849 207 // SVDEBUG << "comparing output " << i << " name \"" << outputs[i].identifier << "\" with expected \"" << m_transform.getOutput() << "\"" << endl;
Chris@849 208 if (m_transforms[j].getOutput() == "" ||
Chris@849 209 outputs[i].identifier == m_transforms[j].getOutput().toStdString()) {
Chris@849 210 m_outputNos.push_back(i);
Chris@849 211 m_descriptors.push_back(new Vamp::Plugin::OutputDescriptor(outputs[i]));
Chris@849 212 m_fixedRateFeatureNos.push_back(-1); // we increment before use
Chris@849 213 break;
Chris@849 214 }
Chris@849 215 }
Chris@849 216
Chris@930 217 if ((int)m_descriptors.size() <= j) {
Chris@849 218 m_message = tr("Plugin \"%1\" has no output named \"%2\"")
Chris@849 219 .arg(pluginId)
Chris@849 220 .arg(m_transforms[j].getOutput());
Chris@849 221 return false;
Chris@849 222 }
Chris@320 223 }
Chris@320 224
Chris@849 225 for (int j = 0; j < (int)m_transforms.size(); ++j) {
Chris@876 226 createOutputModels(j);
Chris@849 227 }
Chris@849 228
Chris@1211 229 m_outputMutex.lock();
Chris@1211 230 m_haveOutputs = true;
Chris@1211 231 m_outputsCondition.wakeAll();
Chris@1211 232 m_outputMutex.unlock();
Chris@1211 233
Chris@849 234 return true;
Chris@558 235 }
Chris@558 236
Chris@558 237 void
Chris@1237 238 FeatureExtractionModelTransformer::deinitialise()
Chris@1237 239 {
Chris@1237 240 cerr << "deleting plugin for transform in thread "
Chris@1237 241 << QThread::currentThreadId() << endl;
Chris@1237 242
Chris@1237 243 delete m_plugin;
Chris@1237 244 for (int j = 0; j < (int)m_descriptors.size(); ++j) {
Chris@1237 245 delete m_descriptors[j];
Chris@1237 246 }
Chris@1237 247 }
Chris@1237 248
Chris@1237 249 void
Chris@876 250 FeatureExtractionModelTransformer::createOutputModels(int n)
Chris@558 251 {
Chris@558 252 DenseTimeValueModel *input = getConformingInput();
Chris@558 253
Chris@843 254 // cerr << "FeatureExtractionModelTransformer::createOutputModel: sample type " << m_descriptor->sampleType << ", rate " << m_descriptor->sampleRate << endl;
Chris@712 255
Chris@849 256 PluginRDFDescription description(m_transforms[n].getPluginIdentifier());
Chris@849 257 QString outputId = m_transforms[n].getOutput();
Chris@558 258
Chris@320 259 int binCount = 1;
Chris@320 260 float minValue = 0.0, maxValue = 0.0;
Chris@320 261 bool haveExtents = false;
Chris@876 262 bool haveBinCount = m_descriptors[n]->hasFixedBinCount;
Chris@876 263
Chris@876 264 if (haveBinCount) {
Chris@1039 265 binCount = (int)m_descriptors[n]->binCount;
Chris@320 266 }
Chris@320 267
Chris@876 268 m_needAdditionalModels[n] = false;
Chris@876 269
Chris@843 270 // cerr << "FeatureExtractionModelTransformer: output bin count "
Chris@843 271 // << binCount << endl;
Chris@320 272
Chris@849 273 if (binCount > 0 && m_descriptors[n]->hasKnownExtents) {
Chris@849 274 minValue = m_descriptors[n]->minValue;
Chris@849 275 maxValue = m_descriptors[n]->maxValue;
Chris@320 276 haveExtents = true;
Chris@320 277 }
Chris@320 278
Chris@1040 279 sv_samplerate_t modelRate = input->getSampleRate();
Chris@930 280 int modelResolution = 1;
Chris@712 281
Chris@849 282 if (m_descriptors[n]->sampleType !=
Chris@785 283 Vamp::Plugin::OutputDescriptor::OneSamplePerStep) {
Chris@849 284 if (m_descriptors[n]->sampleRate > input->getSampleRate()) {
Chris@843 285 cerr << "WARNING: plugin reports output sample rate as "
Chris@849 286 << m_descriptors[n]->sampleRate << " (can't display features with finer resolution than the input rate of " << input->getSampleRate() << ")" << endl;
Chris@785 287 }
Chris@785 288 }
Chris@785 289
Chris@849 290 switch (m_descriptors[n]->sampleType) {
Chris@320 291
Chris@320 292 case Vamp::Plugin::OutputDescriptor::VariableSampleRate:
Chris@849 293 if (m_descriptors[n]->sampleRate != 0.0) {
Chris@1040 294 modelResolution = int(round(modelRate / m_descriptors[n]->sampleRate));
Chris@320 295 }
Chris@320 296 break;
Chris@320 297
Chris@320 298 case Vamp::Plugin::OutputDescriptor::OneSamplePerStep:
Chris@849 299 modelResolution = m_transforms[n].getStepSize();
Chris@320 300 break;
Chris@320 301
Chris@320 302 case Vamp::Plugin::OutputDescriptor::FixedSampleRate:
Chris@451 303 //!!! SV doesn't actually support display of models that have
Chris@451 304 //!!! different underlying rates together -- so we always set
Chris@451 305 //!!! the model rate to be the input model's rate, and adjust
Chris@451 306 //!!! the resolution appropriately. We can't properly display
Chris@451 307 //!!! data with a higher resolution than the base model at all
Chris@849 308 if (m_descriptors[n]->sampleRate > input->getSampleRate()) {
Chris@451 309 modelResolution = 1;
Chris@1071 310 } else if (m_descriptors[n]->sampleRate <= 0.0) {
Chris@1071 311 cerr << "WARNING: Fixed sample-rate plugin reports invalid sample rate " << m_descriptors[n]->sampleRate << "; defaulting to input rate of " << input->getSampleRate() << endl;
Chris@1071 312 modelResolution = 1;
Chris@451 313 } else {
Chris@1040 314 modelResolution = int(round(modelRate / m_descriptors[n]->sampleRate));
Chris@451 315 }
Chris@320 316 break;
Chris@320 317 }
Chris@320 318
Chris@441 319 bool preDurationPlugin = (m_plugin->getVampApiVersion() < 2);
Chris@441 320
Chris@849 321 Model *out = 0;
Chris@849 322
Chris@441 323 if (binCount == 0 &&
Chris@849 324 (preDurationPlugin || !m_descriptors[n]->hasDuration)) {
Chris@320 325
Chris@445 326 // Anything with no value and no duration is an instant
Chris@445 327
Chris@849 328 out = new SparseOneDimensionalModel(modelRate, modelResolution, false);
Chris@558 329 QString outputEventTypeURI = description.getOutputEventTypeURI(outputId);
Chris@849 330 out->setRDFTypeURI(outputEventTypeURI);
Chris@558 331
Chris@441 332 } else if ((preDurationPlugin && binCount > 1 &&
Chris@849 333 (m_descriptors[n]->sampleType ==
Chris@441 334 Vamp::Plugin::OutputDescriptor::VariableSampleRate)) ||
Chris@849 335 (!preDurationPlugin && m_descriptors[n]->hasDuration)) {
Chris@441 336
Chris@441 337 // For plugins using the old v1 API without explicit duration,
Chris@441 338 // we treat anything that has multiple bins (i.e. that has the
Chris@441 339 // potential to have value and duration) and a variable sample
Chris@441 340 // rate as a note model, taking its values as pitch, duration
Chris@441 341 // and velocity (if present) respectively. This is the same
Chris@441 342 // behaviour as always applied by SV to these plugins in the
Chris@441 343 // past.
Chris@441 344
Chris@441 345 // For plugins with the newer API, we treat anything with
Chris@441 346 // duration as either a note model with pitch and velocity, or
Chris@441 347 // a region model.
Chris@441 348
Chris@441 349 // How do we know whether it's an interval or note model?
Chris@441 350 // What's the essential difference? Is a note model any
Chris@441 351 // interval model using a Hz or "MIDI pitch" scale? There
Chris@441 352 // isn't really a reliable test for "MIDI pitch"... Does a
Chris@441 353 // note model always have velocity? This is a good question
Chris@441 354 // to be addressed by accompanying RDF, but for the moment we
Chris@441 355 // will do the following...
Chris@441 356
Chris@441 357 bool isNoteModel = false;
Chris@441 358
Chris@441 359 // Regions have only value (and duration -- we can't extract a
Chris@441 360 // region model from an old-style plugin that doesn't support
Chris@441 361 // duration)
Chris@441 362 if (binCount > 1) isNoteModel = true;
Chris@441 363
Chris@595 364 // Regions do not have units of Hz or MIDI things (a sweeping
Chris@595 365 // assumption!)
Chris@849 366 if (m_descriptors[n]->unit == "Hz" ||
Chris@849 367 m_descriptors[n]->unit.find("MIDI") != std::string::npos ||
Chris@849 368 m_descriptors[n]->unit.find("midi") != std::string::npos) {
Chris@595 369 isNoteModel = true;
Chris@595 370 }
Chris@441 371
Chris@441 372 // If we had a "sparse 3D model", we would have the additional
Chris@441 373 // problem of determining whether to use that here (if bin
Chris@441 374 // count > 1). But we don't.
Chris@441 375
Chris@859 376 QSettings settings;
Chris@859 377 settings.beginGroup("Transformer");
Chris@859 378 bool flexi = settings.value("use-flexi-note-model", false).toBool();
Chris@859 379 settings.endGroup();
Chris@859 380
Chris@859 381 cerr << "flexi = " << flexi << endl;
Chris@859 382
Chris@859 383 if (isNoteModel && !flexi) {
Chris@441 384
Chris@441 385 NoteModel *model;
Chris@441 386 if (haveExtents) {
Chris@859 387 model = new NoteModel
Chris@859 388 (modelRate, modelResolution, minValue, maxValue, false);
Chris@441 389 } else {
Chris@859 390 model = new NoteModel
Chris@859 391 (modelRate, modelResolution, false);
gyorgyf@786 392 }
Chris@849 393 model->setScaleUnits(m_descriptors[n]->unit.c_str());
Chris@849 394 out = model;
gyorgyf@786 395
Chris@859 396 } else if (isNoteModel && flexi) {
gyorgyf@786 397
gyorgyf@786 398 FlexiNoteModel *model;
gyorgyf@786 399 if (haveExtents) {
Chris@859 400 model = new FlexiNoteModel
Chris@859 401 (modelRate, modelResolution, minValue, maxValue, false);
gyorgyf@786 402 } else {
Chris@859 403 model = new FlexiNoteModel
Chris@859 404 (modelRate, modelResolution, false);
Chris@441 405 }
Chris@849 406 model->setScaleUnits(m_descriptors[n]->unit.c_str());
Chris@849 407 out = model;
Chris@441 408
Chris@441 409 } else {
Chris@441 410
Chris@441 411 RegionModel *model;
Chris@441 412 if (haveExtents) {
Chris@441 413 model = new RegionModel
Chris@441 414 (modelRate, modelResolution, minValue, maxValue, false);
Chris@441 415 } else {
Chris@441 416 model = new RegionModel
Chris@441 417 (modelRate, modelResolution, false);
Chris@441 418 }
Chris@849 419 model->setScaleUnits(m_descriptors[n]->unit.c_str());
Chris@849 420 out = model;
Chris@441 421 }
Chris@441 422
Chris@558 423 QString outputEventTypeURI = description.getOutputEventTypeURI(outputId);
Chris@849 424 out->setRDFTypeURI(outputEventTypeURI);
Chris@558 425
Chris@876 426 } else if (binCount == 1 ||
Chris@849 427 (m_descriptors[n]->sampleType ==
Chris@441 428 Vamp::Plugin::OutputDescriptor::VariableSampleRate)) {
Chris@441 429
Chris@441 430 // Anything that is not a 1D, note, or interval model and that
Chris@441 431 // has only one value per result must be a sparse time value
Chris@441 432 // model.
Chris@441 433
Chris@441 434 // Anything that is not a 1D, note, or interval model and that
Chris@876 435 // has a variable sample rate is treated as a set of sparse
Chris@876 436 // time value models, one per output bin, because we lack a
Chris@441 437 // sparse 3D model.
Chris@320 438
Chris@876 439 // Anything that is not a 1D, note, or interval model and that
Chris@876 440 // has a fixed sample rate but an unknown number of values per
Chris@876 441 // result is also treated as a set of sparse time value models.
Chris@876 442
Chris@876 443 // For sets of sparse time value models, we create a single
Chris@876 444 // model first as the "standard" output and then create models
Chris@876 445 // for bins 1+ in the additional model map (mapping the output
Chris@876 446 // descriptor to a list of models indexed by bin-1). But we
Chris@876 447 // don't create the additional models yet, as this case has to
Chris@876 448 // work even if the number of bins is unknown at this point --
Chris@877 449 // we create an additional model (copying its parameters from
Chris@877 450 // the default one) each time a new bin is encountered.
Chris@876 451
Chris@876 452 if (!haveBinCount || binCount > 1) {
Chris@876 453 m_needAdditionalModels[n] = true;
Chris@876 454 }
Chris@876 455
Chris@320 456 SparseTimeValueModel *model;
Chris@320 457 if (haveExtents) {
Chris@320 458 model = new SparseTimeValueModel
Chris@320 459 (modelRate, modelResolution, minValue, maxValue, false);
Chris@320 460 } else {
Chris@320 461 model = new SparseTimeValueModel
Chris@320 462 (modelRate, modelResolution, false);
Chris@320 463 }
Chris@558 464
Chris@558 465 Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors();
Chris@849 466 model->setScaleUnits(outputs[m_outputNos[n]].unit.c_str());
Chris@320 467
Chris@849 468 out = model;
Chris@320 469
Chris@558 470 QString outputEventTypeURI = description.getOutputEventTypeURI(outputId);
Chris@849 471 out->setRDFTypeURI(outputEventTypeURI);
Chris@558 472
Chris@441 473 } else {
Chris@320 474
Chris@441 475 // Anything that is not a 1D, note, or interval model and that
Chris@441 476 // has a fixed sample rate and more than one value per result
Chris@441 477 // must be a dense 3D model.
Chris@320 478
Chris@320 479 EditableDenseThreeDimensionalModel *model =
Chris@320 480 new EditableDenseThreeDimensionalModel
Chris@535 481 (modelRate, modelResolution, binCount,
Chris@535 482 EditableDenseThreeDimensionalModel::BasicMultirateCompression,
Chris@535 483 false);
Chris@320 484
Chris@849 485 if (!m_descriptors[n]->binNames.empty()) {
Chris@320 486 std::vector<QString> names;
Chris@930 487 for (int i = 0; i < (int)m_descriptors[n]->binNames.size(); ++i) {
Chris@849 488 names.push_back(m_descriptors[n]->binNames[i].c_str());
Chris@320 489 }
Chris@320 490 model->setBinNames(names);
Chris@320 491 }
Chris@320 492
Chris@849 493 out = model;
Chris@558 494
Chris@558 495 QString outputSignalTypeURI = description.getOutputSignalTypeURI(outputId);
Chris@849 496 out->setRDFTypeURI(outputSignalTypeURI);
Chris@320 497 }
Chris@333 498
Chris@849 499 if (out) {
Chris@849 500 out->setSourceModel(input);
Chris@849 501 m_outputs.push_back(out);
Chris@849 502 }
Chris@320 503 }
Chris@320 504
Chris@1211 505 void
Chris@1211 506 FeatureExtractionModelTransformer::awaitOutputModels()
Chris@1211 507 {
Chris@1211 508 m_outputMutex.lock();
Chris@1211 509 while (!m_haveOutputs) {
Chris@1211 510 m_outputsCondition.wait(&m_outputMutex);
Chris@1211 511 }
Chris@1211 512 m_outputMutex.unlock();
Chris@1211 513 }
Chris@1211 514
Chris@331 515 FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer()
Chris@320 516 {
Chris@1237 517 // Parent class dtor set the abandoned flag and waited for the run
Chris@1237 518 // thread to exit; the run thread owns the plugin, and should have
Chris@1237 519 // destroyed it before exiting (via a call to deinitialise)
Chris@320 520 }
Chris@320 521
Chris@876 522 FeatureExtractionModelTransformer::Models
Chris@876 523 FeatureExtractionModelTransformer::getAdditionalOutputModels()
Chris@876 524 {
Chris@876 525 Models mm;
Chris@876 526 for (AdditionalModelMap::iterator i = m_additionalModels.begin();
Chris@876 527 i != m_additionalModels.end(); ++i) {
Chris@876 528 for (std::map<int, SparseTimeValueModel *>::iterator j =
Chris@876 529 i->second.begin();
Chris@876 530 j != i->second.end(); ++j) {
Chris@876 531 SparseTimeValueModel *m = j->second;
Chris@876 532 if (m) mm.push_back(m);
Chris@876 533 }
Chris@876 534 }
Chris@876 535 return mm;
Chris@876 536 }
Chris@876 537
Chris@877 538 bool
Chris@877 539 FeatureExtractionModelTransformer::willHaveAdditionalOutputModels()
Chris@877 540 {
Chris@877 541 for (std::map<int, bool>::const_iterator i =
Chris@877 542 m_needAdditionalModels.begin();
Chris@877 543 i != m_needAdditionalModels.end(); ++i) {
Chris@877 544 if (i->second) return true;
Chris@877 545 }
Chris@877 546 return false;
Chris@877 547 }
Chris@877 548
Chris@876 549 SparseTimeValueModel *
Chris@876 550 FeatureExtractionModelTransformer::getAdditionalModel(int n, int binNo)
Chris@876 551 {
Chris@893 552 // std::cerr << "getAdditionalModel(" << n << ", " << binNo << ")" << std::endl;
Chris@876 553
Chris@876 554 if (binNo == 0) {
Chris@876 555 std::cerr << "Internal error: binNo == 0 in getAdditionalModel (should be using primary model)" << std::endl;
Chris@876 556 return 0;
Chris@876 557 }
Chris@876 558
Chris@876 559 if (!m_needAdditionalModels[n]) return 0;
Chris@876 560 if (!isOutput<SparseTimeValueModel>(n)) return 0;
Chris@876 561 if (m_additionalModels[n][binNo]) return m_additionalModels[n][binNo];
Chris@876 562
Chris@876 563 std::cerr << "getAdditionalModel(" << n << ", " << binNo << "): creating" << std::endl;
Chris@876 564
Chris@876 565 SparseTimeValueModel *baseModel = getConformingOutput<SparseTimeValueModel>(n);
Chris@876 566 if (!baseModel) return 0;
Chris@876 567
Chris@876 568 std::cerr << "getAdditionalModel(" << n << ", " << binNo << "): (from " << baseModel << ")" << std::endl;
Chris@876 569
Chris@876 570 SparseTimeValueModel *additional =
Chris@876 571 new SparseTimeValueModel(baseModel->getSampleRate(),
Chris@876 572 baseModel->getResolution(),
Chris@876 573 baseModel->getValueMinimum(),
Chris@876 574 baseModel->getValueMaximum(),
Chris@876 575 false);
Chris@876 576
Chris@876 577 additional->setScaleUnits(baseModel->getScaleUnits());
Chris@876 578 additional->setRDFTypeURI(baseModel->getRDFTypeURI());
Chris@876 579
Chris@876 580 m_additionalModels[n][binNo] = additional;
Chris@876 581 return additional;
Chris@876 582 }
Chris@876 583
Chris@320 584 DenseTimeValueModel *
Chris@350 585 FeatureExtractionModelTransformer::getConformingInput()
Chris@320 586 {
Chris@690 587 // SVDEBUG << "FeatureExtractionModelTransformer::getConformingInput: input model is " << getInputModel() << endl;
Chris@408 588
Chris@320 589 DenseTimeValueModel *dtvm =
Chris@320 590 dynamic_cast<DenseTimeValueModel *>(getInputModel());
Chris@320 591 if (!dtvm) {
Chris@690 592 SVDEBUG << "FeatureExtractionModelTransformer::getConformingInput: WARNING: Input model is not conformable to DenseTimeValueModel" << endl;
Chris@320 593 }
Chris@320 594 return dtvm;
Chris@320 595 }
Chris@320 596
Chris@320 597 void
Chris@331 598 FeatureExtractionModelTransformer::run()
Chris@320 599 {
Chris@1211 600 initialise();
Chris@1211 601
Chris@350 602 DenseTimeValueModel *input = getConformingInput();
Chris@320 603 if (!input) return;
Chris@320 604
Chris@849 605 if (m_outputs.empty()) return;
Chris@320 606
Chris@850 607 Transform primaryTransform = m_transforms[0];
Chris@850 608
Chris@497 609 while (!input->isReady() && !m_abandoned) {
Chris@877 610 cerr << "FeatureExtractionModelTransformer::run: Waiting for input model to be ready..." << endl;
Chris@497 611 usleep(500000);
Chris@320 612 }
Chris@497 613 if (m_abandoned) return;
Chris@320 614
Chris@1040 615 sv_samplerate_t sampleRate = input->getSampleRate();
Chris@320 616
Chris@930 617 int channelCount = input->getChannelCount();
Chris@930 618 if ((int)m_plugin->getMaxChannelCount() < channelCount) {
Chris@320 619 channelCount = 1;
Chris@320 620 }
Chris@320 621
Chris@320 622 float **buffers = new float*[channelCount];
Chris@930 623 for (int ch = 0; ch < channelCount; ++ch) {
Chris@850 624 buffers[ch] = new float[primaryTransform.getBlockSize() + 2];
Chris@320 625 }
Chris@320 626
Chris@930 627 int stepSize = primaryTransform.getStepSize();
Chris@930 628 int blockSize = primaryTransform.getBlockSize();
Chris@350 629
Chris@320 630 bool frequencyDomain = (m_plugin->getInputDomain() ==
Chris@320 631 Vamp::Plugin::FrequencyDomain);
Chris@320 632 std::vector<FFTModel *> fftModels;
Chris@320 633
Chris@320 634 if (frequencyDomain) {
Chris@930 635 for (int ch = 0; ch < channelCount; ++ch) {
Chris@320 636 FFTModel *model = new FFTModel
Chris@350 637 (getConformingInput(),
Chris@350 638 channelCount == 1 ? m_input.getChannel() : ch,
Chris@850 639 primaryTransform.getWindowType(),
Chris@350 640 blockSize,
Chris@350 641 stepSize,
Chris@1090 642 blockSize);
Chris@1080 643 if (!model->isOK() || model->getError() != "") {
Chris@1080 644 QString err = model->getError();
Chris@320 645 delete model;
Chris@850 646 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@850 647 setCompletion(j, 100);
Chris@850 648 }
Chris@387 649 //!!! need a better way to handle this -- previously we were using a QMessageBox but that isn't an appropriate thing to do here either
Chris@1080 650 throw AllocationFailed("Failed to create the FFT model for this feature extraction model transformer: error is: " + err);
Chris@320 651 }
Chris@320 652 fftModels.push_back(model);
Chris@1080 653 cerr << "created model for channel " << ch << endl;
Chris@320 654 }
Chris@320 655 }
Chris@320 656
Chris@1040 657 sv_frame_t startFrame = m_input.getModel()->getStartFrame();
Chris@1040 658 sv_frame_t endFrame = m_input.getModel()->getEndFrame();
Chris@320 659
Chris@850 660 RealTime contextStartRT = primaryTransform.getStartTime();
Chris@850 661 RealTime contextDurationRT = primaryTransform.getDuration();
Chris@350 662
Chris@1040 663 sv_frame_t contextStart =
Chris@350 664 RealTime::realTime2Frame(contextStartRT, sampleRate);
Chris@350 665
Chris@1040 666 sv_frame_t contextDuration =
Chris@350 667 RealTime::realTime2Frame(contextDurationRT, sampleRate);
Chris@320 668
Chris@320 669 if (contextStart == 0 || contextStart < startFrame) {
Chris@320 670 contextStart = startFrame;
Chris@320 671 }
Chris@320 672
Chris@320 673 if (contextDuration == 0) {
Chris@320 674 contextDuration = endFrame - contextStart;
Chris@320 675 }
Chris@320 676 if (contextStart + contextDuration > endFrame) {
Chris@320 677 contextDuration = endFrame - contextStart;
Chris@320 678 }
Chris@320 679
Chris@1039 680 sv_frame_t blockFrame = contextStart;
Chris@320 681
Chris@320 682 long prevCompletion = 0;
Chris@320 683
Chris@850 684 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@850 685 setCompletion(j, 0);
Chris@850 686 }
Chris@320 687
Chris@556 688 float *reals = 0;
Chris@556 689 float *imaginaries = 0;
Chris@556 690 if (frequencyDomain) {
Chris@556 691 reals = new float[blockSize/2 + 1];
Chris@556 692 imaginaries = new float[blockSize/2 + 1];
Chris@556 693 }
Chris@556 694
Chris@678 695 QString error = "";
Chris@678 696
Chris@320 697 while (!m_abandoned) {
Chris@320 698
Chris@320 699 if (frequencyDomain) {
Chris@350 700 if (blockFrame - int(blockSize)/2 >
Chris@320 701 contextStart + contextDuration) break;
Chris@320 702 } else {
Chris@320 703 if (blockFrame >=
Chris@320 704 contextStart + contextDuration) break;
Chris@320 705 }
Chris@320 706
Chris@690 707 // SVDEBUG << "FeatureExtractionModelTransformer::run: blockFrame "
Chris@320 708 // << blockFrame << ", endFrame " << endFrame << ", blockSize "
Chris@687 709 // << blockSize << endl;
Chris@320 710
Chris@1039 711 int completion = int
Chris@1039 712 ((((blockFrame - contextStart) / stepSize) * 99) /
Chris@1039 713 (contextDuration / stepSize + 1));
Chris@320 714
Chris@350 715 // channelCount is either m_input.getModel()->channelCount or 1
Chris@320 716
Chris@363 717 if (frequencyDomain) {
Chris@930 718 for (int ch = 0; ch < channelCount; ++ch) {
Chris@1039 719 int column = int((blockFrame - startFrame) / stepSize);
Chris@1008 720 if (fftModels[ch]->getValuesAt(column, reals, imaginaries)) {
Chris@1008 721 for (int i = 0; i <= blockSize/2; ++i) {
Chris@1008 722 buffers[ch][i*2] = reals[i];
Chris@1008 723 buffers[ch][i*2+1] = imaginaries[i];
Chris@1008 724 }
Chris@1008 725 } else {
Chris@1008 726 for (int i = 0; i <= blockSize/2; ++i) {
Chris@1008 727 buffers[ch][i*2] = 0.f;
Chris@1008 728 buffers[ch][i*2+1] = 0.f;
Chris@1008 729 }
Chris@1008 730 }
Chris@678 731 error = fftModels[ch]->getError();
Chris@678 732 if (error != "") {
Chris@843 733 cerr << "FeatureExtractionModelTransformer::run: Abandoning, error is " << error << endl;
Chris@678 734 m_abandoned = true;
Chris@678 735 m_message = error;
Chris@1080 736 break;
Chris@678 737 }
Chris@363 738 }
Chris@363 739 } else {
Chris@363 740 getFrames(channelCount, blockFrame, blockSize, buffers);
Chris@320 741 }
Chris@320 742
Chris@497 743 if (m_abandoned) break;
Chris@497 744
Chris@320 745 Vamp::Plugin::FeatureSet features = m_plugin->process
Chris@1040 746 (buffers, RealTime::frame2RealTime(blockFrame, sampleRate).toVampRealTime());
Chris@320 747
Chris@497 748 if (m_abandoned) break;
Chris@497 749
Chris@850 750 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@930 751 for (int fi = 0; fi < (int)features[m_outputNos[j]].size(); ++fi) {
Chris@850 752 Vamp::Plugin::Feature feature = features[m_outputNos[j]][fi];
Chris@850 753 addFeature(j, blockFrame, feature);
Chris@850 754 }
Chris@850 755 }
Chris@320 756
Chris@320 757 if (blockFrame == contextStart || completion > prevCompletion) {
Chris@850 758 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@850 759 setCompletion(j, completion);
Chris@850 760 }
Chris@320 761 prevCompletion = completion;
Chris@320 762 }
Chris@320 763
Chris@350 764 blockFrame += stepSize;
Chris@320 765 }
Chris@320 766
Chris@497 767 if (!m_abandoned) {
Chris@497 768 Vamp::Plugin::FeatureSet features = m_plugin->getRemainingFeatures();
Chris@320 769
Chris@850 770 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@930 771 for (int fi = 0; fi < (int)features[m_outputNos[j]].size(); ++fi) {
Chris@850 772 Vamp::Plugin::Feature feature = features[m_outputNos[j]][fi];
Chris@850 773 addFeature(j, blockFrame, feature);
Chris@850 774 }
Chris@497 775 }
Chris@497 776 }
Chris@320 777
Chris@850 778 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@850 779 setCompletion(j, 100);
Chris@850 780 }
Chris@320 781
Chris@320 782 if (frequencyDomain) {
Chris@930 783 for (int ch = 0; ch < channelCount; ++ch) {
Chris@320 784 delete fftModels[ch];
Chris@320 785 }
Chris@556 786 delete[] reals;
Chris@556 787 delete[] imaginaries;
Chris@320 788 }
Chris@974 789
Chris@974 790 for (int ch = 0; ch < channelCount; ++ch) {
Chris@974 791 delete[] buffers[ch];
Chris@974 792 }
Chris@974 793 delete[] buffers;
Chris@1237 794
Chris@1237 795 deinitialise();
Chris@320 796 }
Chris@320 797
Chris@320 798 void
Chris@363 799 FeatureExtractionModelTransformer::getFrames(int channelCount,
Chris@1039 800 sv_frame_t startFrame,
Chris@1039 801 sv_frame_t size,
Chris@363 802 float **buffers)
Chris@320 803 {
Chris@1039 804 sv_frame_t offset = 0;
Chris@320 805
Chris@320 806 if (startFrame < 0) {
Chris@363 807 for (int c = 0; c < channelCount; ++c) {
Chris@1039 808 for (sv_frame_t i = 0; i < size && startFrame + i < 0; ++i) {
Chris@363 809 buffers[c][i] = 0.0f;
Chris@363 810 }
Chris@320 811 }
Chris@320 812 offset = -startFrame;
Chris@320 813 size -= offset;
Chris@320 814 if (size <= 0) return;
Chris@320 815 startFrame = 0;
Chris@320 816 }
Chris@320 817
Chris@350 818 DenseTimeValueModel *input = getConformingInput();
Chris@350 819 if (!input) return;
Chris@363 820
Chris@1039 821 sv_frame_t got = 0;
Chris@350 822
Chris@363 823 if (channelCount == 1) {
Chris@363 824
Chris@1096 825 auto data = input->getData(m_input.getChannel(), startFrame, size);
Chris@1096 826 got = data.size();
Chris@1096 827
Chris@1096 828 copy(data.begin(), data.end(), buffers[0] + offset);
Chris@363 829
Chris@363 830 if (m_input.getChannel() == -1 && input->getChannelCount() > 1) {
Chris@363 831 // use mean instead of sum, as plugin input
Chris@363 832 float cc = float(input->getChannelCount());
Chris@1096 833 for (sv_frame_t i = 0; i < got; ++i) {
Chris@363 834 buffers[0][i + offset] /= cc;
Chris@363 835 }
Chris@363 836 }
Chris@363 837
Chris@363 838 } else {
Chris@363 839
Chris@1096 840 auto data = input->getMultiChannelData(0, channelCount-1, startFrame, size);
Chris@1096 841 if (!data.empty()) {
Chris@1096 842 got = data[0].size();
Chris@1096 843 for (int c = 0; in_range_for(data, c); ++c) {
Chris@1096 844 copy(data[c].begin(), data[c].end(), buffers[c] + offset);
Chris@363 845 }
Chris@363 846 }
Chris@363 847 }
Chris@320 848
Chris@320 849 while (got < size) {
Chris@363 850 for (int c = 0; c < channelCount; ++c) {
Chris@363 851 buffers[c][got + offset] = 0.0;
Chris@363 852 }
Chris@320 853 ++got;
Chris@320 854 }
Chris@320 855 }
Chris@320 856
Chris@320 857 void
Chris@850 858 FeatureExtractionModelTransformer::addFeature(int n,
Chris@1039 859 sv_frame_t blockFrame,
Chris@850 860 const Vamp::Plugin::Feature &feature)
Chris@320 861 {
Chris@1040 862 sv_samplerate_t inputRate = m_input.getModel()->getSampleRate();
Chris@320 863
Chris@843 864 // cerr << "FeatureExtractionModelTransformer::addFeature: blockFrame = "
Chris@712 865 // << blockFrame << ", hasTimestamp = " << feature.hasTimestamp
Chris@712 866 // << ", timestamp = " << feature.timestamp << ", hasDuration = "
Chris@712 867 // << feature.hasDuration << ", duration = " << feature.duration
Chris@843 868 // << endl;
Chris@320 869
Chris@1039 870 sv_frame_t frame = blockFrame;
Chris@320 871
Chris@849 872 if (m_descriptors[n]->sampleType ==
Chris@320 873 Vamp::Plugin::OutputDescriptor::VariableSampleRate) {
Chris@320 874
Chris@320 875 if (!feature.hasTimestamp) {
Chris@843 876 cerr
Chris@331 877 << "WARNING: FeatureExtractionModelTransformer::addFeature: "
Chris@320 878 << "Feature has variable sample rate but no timestamp!"
Chris@843 879 << endl;
Chris@320 880 return;
Chris@320 881 } else {
Chris@1040 882 frame = RealTime::realTime2Frame(feature.timestamp, inputRate);
Chris@320 883 }
Chris@320 884
Chris@1071 885 // cerr << "variable sample rate: timestamp = " << feature.timestamp
Chris@1071 886 // << " at input rate " << inputRate << " -> " << frame << endl;
Chris@1071 887
Chris@849 888 } else if (m_descriptors[n]->sampleType ==
Chris@320 889 Vamp::Plugin::OutputDescriptor::FixedSampleRate) {
Chris@320 890
Chris@1071 891 sv_samplerate_t rate = m_descriptors[n]->sampleRate;
Chris@1071 892 if (rate <= 0.0) {
Chris@1071 893 rate = inputRate;
Chris@1071 894 }
Chris@1071 895
Chris@779 896 if (!feature.hasTimestamp) {
Chris@849 897 ++m_fixedRateFeatureNos[n];
Chris@779 898 } else {
Chris@779 899 RealTime ts(feature.timestamp.sec, feature.timestamp.nsec);
Chris@1071 900 m_fixedRateFeatureNos[n] = (int)lrint(ts.toDouble() * rate);
Chris@779 901 }
Chris@862 902
Chris@1071 903 // cerr << "m_fixedRateFeatureNo = " << m_fixedRateFeatureNos[n]
Chris@1071 904 // << ", m_descriptor->sampleRate = " << m_descriptors[n]->sampleRate
Chris@862 905 // << ", inputRate = " << inputRate
Chris@862 906 // << " giving frame = ";
Chris@1071 907 frame = lrint((double(m_fixedRateFeatureNos[n]) / rate) * inputRate);
Chris@1071 908 // cerr << frame << endl;
Chris@320 909 }
Chris@862 910
Chris@862 911 if (frame < 0) {
Chris@862 912 cerr
Chris@862 913 << "WARNING: FeatureExtractionModelTransformer::addFeature: "
Chris@862 914 << "Negative frame counts are not supported (frame = " << frame
Chris@862 915 << " from timestamp " << feature.timestamp
Chris@862 916 << "), dropping feature"
Chris@862 917 << endl;
Chris@862 918 return;
Chris@862 919 }
Chris@862 920
Chris@441 921 // Rather than repeat the complicated tests from the constructor
Chris@441 922 // to determine what sort of model we must be adding the features
Chris@441 923 // to, we instead test what sort of model the constructor decided
Chris@441 924 // to create.
Chris@320 925
Chris@849 926 if (isOutput<SparseOneDimensionalModel>(n)) {
Chris@441 927
Chris@441 928 SparseOneDimensionalModel *model =
Chris@849 929 getConformingOutput<SparseOneDimensionalModel>(n);
Chris@320 930 if (!model) return;
Chris@350 931
Chris@441 932 model->addPoint(SparseOneDimensionalModel::Point
Chris@441 933 (frame, feature.label.c_str()));
Chris@320 934
Chris@849 935 } else if (isOutput<SparseTimeValueModel>(n)) {
Chris@320 936
Chris@350 937 SparseTimeValueModel *model =
Chris@849 938 getConformingOutput<SparseTimeValueModel>(n);
Chris@320 939 if (!model) return;
Chris@350 940
Chris@930 941 for (int i = 0; i < (int)feature.values.size(); ++i) {
Chris@454 942
Chris@454 943 float value = feature.values[i];
Chris@454 944
Chris@454 945 QString label = feature.label.c_str();
Chris@454 946 if (feature.values.size() > 1) {
Chris@454 947 label = QString("[%1] %2").arg(i+1).arg(label);
Chris@454 948 }
Chris@454 949
Chris@876 950 SparseTimeValueModel *targetModel = model;
Chris@876 951
Chris@876 952 if (m_needAdditionalModels[n] && i > 0) {
Chris@876 953 targetModel = getAdditionalModel(n, i);
Chris@876 954 if (!targetModel) targetModel = model;
Chris@893 955 // std::cerr << "adding point to model " << targetModel
Chris@893 956 // << " for output " << n << " bin " << i << std::endl;
Chris@876 957 }
Chris@876 958
Chris@876 959 targetModel->addPoint
Chris@876 960 (SparseTimeValueModel::Point(frame, value, label));
Chris@454 961 }
Chris@320 962
Chris@849 963 } else if (isOutput<FlexiNoteModel>(n) || isOutput<NoteModel>(n) || isOutput<RegionModel>(n)) { //GF: Added Note Model
Chris@320 964
Chris@441 965 int index = 0;
Chris@441 966
Chris@441 967 float value = 0.0;
Chris@930 968 if ((int)feature.values.size() > index) {
Chris@441 969 value = feature.values[index++];
Chris@441 970 }
Chris@320 971
Chris@1039 972 sv_frame_t duration = 1;
Chris@441 973 if (feature.hasDuration) {
Chris@1040 974 duration = RealTime::realTime2Frame(feature.duration, inputRate);
Chris@441 975 } else {
Chris@1039 976 if (in_range_for(feature.values, index)) {
Chris@1039 977 duration = lrintf(feature.values[index++]);
Chris@441 978 }
Chris@441 979 }
gyorgyf@786 980
Chris@891 981 if (isOutput<FlexiNoteModel>(n)) { // GF: added for flexi note model
gyorgyf@786 982
gyorgyf@786 983 float velocity = 100;
Chris@930 984 if ((int)feature.values.size() > index) {
gyorgyf@786 985 velocity = feature.values[index++];
gyorgyf@786 986 }
gyorgyf@786 987 if (velocity < 0) velocity = 127;
gyorgyf@786 988 if (velocity > 127) velocity = 127;
gyorgyf@786 989
Chris@849 990 FlexiNoteModel *model = getConformingOutput<FlexiNoteModel>(n);
gyorgyf@786 991 if (!model) return;
Chris@1039 992 model->addPoint(FlexiNoteModel::Point(frame,
Chris@1039 993 value, // value is pitch
Chris@1039 994 duration,
Chris@1039 995 velocity / 127.f,
Chris@1039 996 feature.label.c_str()));
gyorgyf@786 997 // GF: end -- added for flexi note model
Chris@849 998 } else if (isOutput<NoteModel>(n)) {
Chris@320 999
Chris@441 1000 float velocity = 100;
Chris@930 1001 if ((int)feature.values.size() > index) {
Chris@441 1002 velocity = feature.values[index++];
Chris@441 1003 }
Chris@441 1004 if (velocity < 0) velocity = 127;
Chris@441 1005 if (velocity > 127) velocity = 127;
Chris@320 1006
Chris@849 1007 NoteModel *model = getConformingOutput<NoteModel>(n);
Chris@441 1008 if (!model) return;
Chris@441 1009 model->addPoint(NoteModel::Point(frame, value, // value is pitch
Chris@1039 1010 duration,
Chris@441 1011 velocity / 127.f,
Chris@441 1012 feature.label.c_str()));
Chris@441 1013 } else {
gyorgyf@786 1014
Chris@849 1015 RegionModel *model = getConformingOutput<RegionModel>(n);
Chris@454 1016 if (!model) return;
Chris@454 1017
Chris@474 1018 if (feature.hasDuration && !feature.values.empty()) {
Chris@454 1019
Chris@930 1020 for (int i = 0; i < (int)feature.values.size(); ++i) {
Chris@454 1021
Chris@454 1022 float value = feature.values[i];
Chris@454 1023
Chris@454 1024 QString label = feature.label.c_str();
Chris@454 1025 if (feature.values.size() > 1) {
Chris@454 1026 label = QString("[%1] %2").arg(i+1).arg(label);
Chris@454 1027 }
Chris@454 1028
Chris@1039 1029 model->addPoint(RegionModel::Point(frame,
Chris@1039 1030 value,
Chris@1039 1031 duration,
Chris@454 1032 label));
Chris@454 1033 }
Chris@454 1034 } else {
Chris@454 1035
Chris@1039 1036 model->addPoint(RegionModel::Point(frame,
Chris@1039 1037 value,
Chris@1039 1038 duration,
Chris@441 1039 feature.label.c_str()));
Chris@454 1040 }
Chris@441 1041 }
Chris@320 1042
Chris@849 1043 } else if (isOutput<EditableDenseThreeDimensionalModel>(n)) {
Chris@320 1044
Chris@1154 1045 DenseThreeDimensionalModel::Column values = feature.values;
Chris@320 1046
Chris@320 1047 EditableDenseThreeDimensionalModel *model =
Chris@849 1048 getConformingOutput<EditableDenseThreeDimensionalModel>(n);
Chris@320 1049 if (!model) return;
Chris@320 1050
Chris@889 1051 // cerr << "(note: model resolution = " << model->getResolution() << ")"
Chris@889 1052 // << endl;
Chris@889 1053
Chris@891 1054 if (!feature.hasTimestamp && m_fixedRateFeatureNos[n] >= 0) {
Chris@891 1055 model->setColumn(m_fixedRateFeatureNos[n], values);
Chris@889 1056 } else {
Chris@1039 1057 model->setColumn(int(frame / model->getResolution()), values);
Chris@889 1058 }
Chris@441 1059
Chris@441 1060 } else {
Chris@690 1061 SVDEBUG << "FeatureExtractionModelTransformer::addFeature: Unknown output model type!" << endl;
Chris@320 1062 }
Chris@320 1063 }
Chris@320 1064
Chris@320 1065 void
Chris@850 1066 FeatureExtractionModelTransformer::setCompletion(int n, int completion)
Chris@320 1067 {
Chris@690 1068 // SVDEBUG << "FeatureExtractionModelTransformer::setCompletion("
Chris@687 1069 // << completion << ")" << endl;
Chris@320 1070
Chris@849 1071 if (isOutput<SparseOneDimensionalModel>(n)) {
Chris@320 1072
Chris@350 1073 SparseOneDimensionalModel *model =
Chris@849 1074 getConformingOutput<SparseOneDimensionalModel>(n);
Chris@320 1075 if (!model) return;
Chris@923 1076 if (model->isAbandoning()) abandon();
Chris@441 1077 model->setCompletion(completion, true);
Chris@320 1078
Chris@849 1079 } else if (isOutput<SparseTimeValueModel>(n)) {
Chris@320 1080
Chris@350 1081 SparseTimeValueModel *model =
Chris@849 1082 getConformingOutput<SparseTimeValueModel>(n);
Chris@320 1083 if (!model) return;
Chris@923 1084 if (model->isAbandoning()) abandon();
Chris@441 1085 model->setCompletion(completion, true);
Chris@320 1086
Chris@849 1087 } else if (isOutput<NoteModel>(n)) {
Chris@320 1088
Chris@849 1089 NoteModel *model = getConformingOutput<NoteModel>(n);
Chris@320 1090 if (!model) return;
Chris@923 1091 if (model->isAbandoning()) abandon();
Chris@441 1092 model->setCompletion(completion, true);
gyorgyf@786 1093
Chris@923 1094 } else if (isOutput<FlexiNoteModel>(n)) {
gyorgyf@786 1095
Chris@849 1096 FlexiNoteModel *model = getConformingOutput<FlexiNoteModel>(n);
gyorgyf@786 1097 if (!model) return;
Chris@923 1098 if (model->isAbandoning()) abandon();
gyorgyf@786 1099 model->setCompletion(completion, true);
Chris@320 1100
Chris@849 1101 } else if (isOutput<RegionModel>(n)) {
Chris@441 1102
Chris@849 1103 RegionModel *model = getConformingOutput<RegionModel>(n);
Chris@441 1104 if (!model) return;
Chris@923 1105 if (model->isAbandoning()) abandon();
Chris@441 1106 model->setCompletion(completion, true);
Chris@441 1107
Chris@849 1108 } else if (isOutput<EditableDenseThreeDimensionalModel>(n)) {
Chris@320 1109
Chris@320 1110 EditableDenseThreeDimensionalModel *model =
Chris@849 1111 getConformingOutput<EditableDenseThreeDimensionalModel>(n);
Chris@320 1112 if (!model) return;
Chris@923 1113 if (model->isAbandoning()) abandon();
Chris@350 1114 model->setCompletion(completion, true); //!!!m_context.updates);
Chris@320 1115 }
Chris@320 1116 }
Chris@320 1117