annotate transform/FeatureExtractionModelTransformer.cpp @ 1078:ce82bcdc95d0

Fail upfront if the file is going to be too large. We expect the caller to split up large data sets into several MatrixFiles
author Chris Cannam
date Wed, 10 Jun 2015 13:10:26 +0100
parents f4ad0bfceeb7
children f35c1f9bfaa2
rev   line source
Chris@320 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@320 2
Chris@320 3 /*
Chris@320 4 Sonic Visualiser
Chris@320 5 An audio file viewer and annotation editor.
Chris@320 6 Centre for Digital Music, Queen Mary, University of London.
Chris@320 7 This file copyright 2006 Chris Cannam and QMUL.
Chris@320 8
Chris@320 9 This program is free software; you can redistribute it and/or
Chris@320 10 modify it under the terms of the GNU General Public License as
Chris@320 11 published by the Free Software Foundation; either version 2 of the
Chris@320 12 License, or (at your option) any later version. See the file
Chris@320 13 COPYING included with this distribution for more information.
Chris@320 14 */
Chris@320 15
Chris@331 16 #include "FeatureExtractionModelTransformer.h"
Chris@320 17
Chris@320 18 #include "plugin/FeatureExtractionPluginFactory.h"
Chris@320 19 #include "plugin/PluginXml.h"
Chris@475 20 #include <vamp-hostsdk/Plugin.h>
Chris@320 21
Chris@320 22 #include "data/model/Model.h"
Chris@320 23 #include "base/Window.h"
Chris@387 24 #include "base/Exceptions.h"
Chris@320 25 #include "data/model/SparseOneDimensionalModel.h"
Chris@320 26 #include "data/model/SparseTimeValueModel.h"
Chris@320 27 #include "data/model/EditableDenseThreeDimensionalModel.h"
Chris@320 28 #include "data/model/DenseTimeValueModel.h"
Chris@320 29 #include "data/model/NoteModel.h"
gyorgyf@786 30 #include "data/model/FlexiNoteModel.h"
Chris@441 31 #include "data/model/RegionModel.h"
Chris@320 32 #include "data/model/FFTModel.h"
Chris@320 33 #include "data/model/WaveFileModel.h"
Chris@558 34 #include "rdf/PluginRDFDescription.h"
Chris@320 35
Chris@350 36 #include "TransformFactory.h"
Chris@350 37
Chris@320 38 #include <iostream>
Chris@320 39
Chris@859 40 #include <QSettings>
Chris@859 41
Chris@350 42 FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Input in,
Chris@859 43 const Transform &transform) :
Chris@350 44 ModelTransformer(in, transform),
Chris@859 45 m_plugin(0)
Chris@320 46 {
Chris@690 47 // SVDEBUG << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: plugin " << pluginId << ", outputName " << m_transform.getOutput() << endl;
Chris@350 48
Chris@849 49 initialise();
Chris@849 50 }
Chris@849 51
Chris@849 52 FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Input in,
Chris@859 53 const Transforms &transforms) :
Chris@849 54 ModelTransformer(in, transforms),
Chris@859 55 m_plugin(0)
Chris@849 56 {
Chris@849 57 // SVDEBUG << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: plugin " << pluginId << ", outputName " << m_transform.getOutput() << endl;
Chris@849 58
Chris@849 59 initialise();
Chris@849 60 }
Chris@849 61
Chris@849 62 static bool
Chris@849 63 areTransformsSimilar(const Transform &t1, const Transform &t2)
Chris@849 64 {
Chris@849 65 Transform t2o(t2);
Chris@849 66 t2o.setOutput(t1.getOutput());
Chris@849 67 return t1 == t2o;
Chris@849 68 }
Chris@849 69
Chris@849 70 bool
Chris@849 71 FeatureExtractionModelTransformer::initialise()
Chris@849 72 {
Chris@849 73 // All transforms must use the same plugin, parameters, and
Chris@849 74 // inputs: they can differ only in choice of plugin output. So we
Chris@849 75 // initialise based purely on the first transform in the list (but
Chris@849 76 // first check that they are actually similar as promised)
Chris@849 77
Chris@849 78 for (int j = 1; j < (int)m_transforms.size(); ++j) {
Chris@849 79 if (!areTransformsSimilar(m_transforms[0], m_transforms[j])) {
Chris@849 80 m_message = tr("Transforms supplied to a single FeatureExtractionModelTransformer instance must be similar in every respect except plugin output");
Chris@849 81 return false;
Chris@849 82 }
Chris@849 83 }
Chris@849 84
Chris@849 85 Transform primaryTransform = m_transforms[0];
Chris@849 86
Chris@849 87 QString pluginId = primaryTransform.getPluginIdentifier();
Chris@320 88
Chris@320 89 FeatureExtractionPluginFactory *factory =
Chris@320 90 FeatureExtractionPluginFactory::instanceFor(pluginId);
Chris@320 91
Chris@320 92 if (!factory) {
Chris@361 93 m_message = tr("No factory available for feature extraction plugin id \"%1\" (unknown plugin type, or internal error?)").arg(pluginId);
Chris@849 94 return false;
Chris@320 95 }
Chris@320 96
Chris@350 97 DenseTimeValueModel *input = getConformingInput();
Chris@350 98 if (!input) {
Chris@361 99 m_message = tr("Input model for feature extraction plugin \"%1\" is of wrong type (internal error?)").arg(pluginId);
Chris@849 100 return false;
Chris@350 101 }
Chris@320 102
Chris@1040 103 m_plugin = factory->instantiatePlugin(pluginId, input->getSampleRate());
Chris@320 104 if (!m_plugin) {
Chris@361 105 m_message = tr("Failed to instantiate plugin \"%1\"").arg(pluginId);
Chris@849 106 return false;
Chris@320 107 }
Chris@320 108
Chris@350 109 TransformFactory::getInstance()->makeContextConsistentWithPlugin
Chris@849 110 (primaryTransform, m_plugin);
Chris@343 111
Chris@350 112 TransformFactory::getInstance()->setPluginParameters
Chris@849 113 (primaryTransform, m_plugin);
Chris@320 114
Chris@930 115 int channelCount = input->getChannelCount();
Chris@930 116 if ((int)m_plugin->getMaxChannelCount() < channelCount) {
Chris@320 117 channelCount = 1;
Chris@320 118 }
Chris@930 119 if ((int)m_plugin->getMinChannelCount() > channelCount) {
Chris@361 120 m_message = tr("Cannot provide enough channels to feature extraction plugin \"%1\" (plugin min is %2, max %3; input model has %4)")
Chris@361 121 .arg(pluginId)
Chris@361 122 .arg(m_plugin->getMinChannelCount())
Chris@361 123 .arg(m_plugin->getMaxChannelCount())
Chris@361 124 .arg(input->getChannelCount());
Chris@849 125 return false;
Chris@320 126 }
Chris@320 127
Chris@690 128 SVDEBUG << "Initialising feature extraction plugin with channels = "
Chris@849 129 << channelCount << ", step = " << primaryTransform.getStepSize()
Chris@849 130 << ", block = " << primaryTransform.getBlockSize() << endl;
Chris@320 131
Chris@320 132 if (!m_plugin->initialise(channelCount,
Chris@849 133 primaryTransform.getStepSize(),
Chris@849 134 primaryTransform.getBlockSize())) {
Chris@361 135
Chris@930 136 int pstep = primaryTransform.getStepSize();
Chris@930 137 int pblock = primaryTransform.getBlockSize();
Chris@361 138
Chris@850 139 ///!!! hang on, this isn't right -- we're modifying a copy
Chris@849 140 primaryTransform.setStepSize(0);
Chris@849 141 primaryTransform.setBlockSize(0);
Chris@361 142 TransformFactory::getInstance()->makeContextConsistentWithPlugin
Chris@849 143 (primaryTransform, m_plugin);
Chris@361 144
Chris@849 145 if (primaryTransform.getStepSize() != pstep ||
Chris@849 146 primaryTransform.getBlockSize() != pblock) {
Chris@361 147
Chris@361 148 if (!m_plugin->initialise(channelCount,
Chris@849 149 primaryTransform.getStepSize(),
Chris@849 150 primaryTransform.getBlockSize())) {
Chris@361 151
Chris@361 152 m_message = tr("Failed to initialise feature extraction plugin \"%1\"").arg(pluginId);
Chris@849 153 return false;
Chris@361 154
Chris@361 155 } else {
Chris@361 156
Chris@361 157 m_message = tr("Feature extraction plugin \"%1\" rejected the given step and block sizes (%2 and %3); using plugin defaults (%4 and %5) instead")
Chris@361 158 .arg(pluginId)
Chris@361 159 .arg(pstep)
Chris@361 160 .arg(pblock)
Chris@849 161 .arg(primaryTransform.getStepSize())
Chris@849 162 .arg(primaryTransform.getBlockSize());
Chris@361 163 }
Chris@361 164
Chris@361 165 } else {
Chris@361 166
Chris@361 167 m_message = tr("Failed to initialise feature extraction plugin \"%1\"").arg(pluginId);
Chris@849 168 return false;
Chris@361 169 }
Chris@320 170 }
Chris@320 171
Chris@849 172 if (primaryTransform.getPluginVersion() != "") {
Chris@366 173 QString pv = QString("%1").arg(m_plugin->getPluginVersion());
Chris@849 174 if (pv != primaryTransform.getPluginVersion()) {
Chris@366 175 QString vm = tr("Transform was configured for version %1 of plugin \"%2\", but the plugin being used is version %3")
Chris@849 176 .arg(primaryTransform.getPluginVersion())
Chris@366 177 .arg(pluginId)
Chris@366 178 .arg(pv);
Chris@366 179 if (m_message != "") {
Chris@366 180 m_message = QString("%1; %2").arg(vm).arg(m_message);
Chris@366 181 } else {
Chris@366 182 m_message = vm;
Chris@366 183 }
Chris@366 184 }
Chris@366 185 }
Chris@366 186
Chris@320 187 Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors();
Chris@320 188
Chris@320 189 if (outputs.empty()) {
Chris@361 190 m_message = tr("Plugin \"%1\" has no outputs").arg(pluginId);
Chris@849 191 return false;
Chris@320 192 }
Chris@320 193
Chris@849 194 for (int j = 0; j < (int)m_transforms.size(); ++j) {
Chris@849 195
Chris@849 196 for (int i = 0; i < (int)outputs.size(); ++i) {
Chris@849 197 // SVDEBUG << "comparing output " << i << " name \"" << outputs[i].identifier << "\" with expected \"" << m_transform.getOutput() << "\"" << endl;
Chris@849 198 if (m_transforms[j].getOutput() == "" ||
Chris@849 199 outputs[i].identifier == m_transforms[j].getOutput().toStdString()) {
Chris@849 200 m_outputNos.push_back(i);
Chris@849 201 m_descriptors.push_back(new Vamp::Plugin::OutputDescriptor(outputs[i]));
Chris@849 202 m_fixedRateFeatureNos.push_back(-1); // we increment before use
Chris@849 203 break;
Chris@849 204 }
Chris@849 205 }
Chris@849 206
Chris@930 207 if ((int)m_descriptors.size() <= j) {
Chris@849 208 m_message = tr("Plugin \"%1\" has no output named \"%2\"")
Chris@849 209 .arg(pluginId)
Chris@849 210 .arg(m_transforms[j].getOutput());
Chris@849 211 return false;
Chris@849 212 }
Chris@320 213 }
Chris@320 214
Chris@849 215 for (int j = 0; j < (int)m_transforms.size(); ++j) {
Chris@876 216 createOutputModels(j);
Chris@849 217 }
Chris@849 218
Chris@849 219 return true;
Chris@558 220 }
Chris@558 221
Chris@558 222 void
Chris@876 223 FeatureExtractionModelTransformer::createOutputModels(int n)
Chris@558 224 {
Chris@558 225 DenseTimeValueModel *input = getConformingInput();
Chris@558 226
Chris@843 227 // cerr << "FeatureExtractionModelTransformer::createOutputModel: sample type " << m_descriptor->sampleType << ", rate " << m_descriptor->sampleRate << endl;
Chris@712 228
Chris@849 229 PluginRDFDescription description(m_transforms[n].getPluginIdentifier());
Chris@849 230 QString outputId = m_transforms[n].getOutput();
Chris@558 231
Chris@320 232 int binCount = 1;
Chris@320 233 float minValue = 0.0, maxValue = 0.0;
Chris@320 234 bool haveExtents = false;
Chris@876 235 bool haveBinCount = m_descriptors[n]->hasFixedBinCount;
Chris@876 236
Chris@876 237 if (haveBinCount) {
Chris@1039 238 binCount = (int)m_descriptors[n]->binCount;
Chris@320 239 }
Chris@320 240
Chris@876 241 m_needAdditionalModels[n] = false;
Chris@876 242
Chris@843 243 // cerr << "FeatureExtractionModelTransformer: output bin count "
Chris@843 244 // << binCount << endl;
Chris@320 245
Chris@849 246 if (binCount > 0 && m_descriptors[n]->hasKnownExtents) {
Chris@849 247 minValue = m_descriptors[n]->minValue;
Chris@849 248 maxValue = m_descriptors[n]->maxValue;
Chris@320 249 haveExtents = true;
Chris@320 250 }
Chris@320 251
Chris@1040 252 sv_samplerate_t modelRate = input->getSampleRate();
Chris@930 253 int modelResolution = 1;
Chris@712 254
Chris@849 255 if (m_descriptors[n]->sampleType !=
Chris@785 256 Vamp::Plugin::OutputDescriptor::OneSamplePerStep) {
Chris@849 257 if (m_descriptors[n]->sampleRate > input->getSampleRate()) {
Chris@843 258 cerr << "WARNING: plugin reports output sample rate as "
Chris@849 259 << m_descriptors[n]->sampleRate << " (can't display features with finer resolution than the input rate of " << input->getSampleRate() << ")" << endl;
Chris@785 260 }
Chris@785 261 }
Chris@785 262
Chris@849 263 switch (m_descriptors[n]->sampleType) {
Chris@320 264
Chris@320 265 case Vamp::Plugin::OutputDescriptor::VariableSampleRate:
Chris@849 266 if (m_descriptors[n]->sampleRate != 0.0) {
Chris@1040 267 modelResolution = int(round(modelRate / m_descriptors[n]->sampleRate));
Chris@320 268 }
Chris@320 269 break;
Chris@320 270
Chris@320 271 case Vamp::Plugin::OutputDescriptor::OneSamplePerStep:
Chris@849 272 modelResolution = m_transforms[n].getStepSize();
Chris@320 273 break;
Chris@320 274
Chris@320 275 case Vamp::Plugin::OutputDescriptor::FixedSampleRate:
Chris@451 276 //!!! SV doesn't actually support display of models that have
Chris@451 277 //!!! different underlying rates together -- so we always set
Chris@451 278 //!!! the model rate to be the input model's rate, and adjust
Chris@451 279 //!!! the resolution appropriately. We can't properly display
Chris@451 280 //!!! data with a higher resolution than the base model at all
Chris@849 281 if (m_descriptors[n]->sampleRate > input->getSampleRate()) {
Chris@451 282 modelResolution = 1;
Chris@1071 283 } else if (m_descriptors[n]->sampleRate <= 0.0) {
Chris@1071 284 cerr << "WARNING: Fixed sample-rate plugin reports invalid sample rate " << m_descriptors[n]->sampleRate << "; defaulting to input rate of " << input->getSampleRate() << endl;
Chris@1071 285 modelResolution = 1;
Chris@451 286 } else {
Chris@1040 287 modelResolution = int(round(modelRate / m_descriptors[n]->sampleRate));
Chris@451 288 }
Chris@320 289 break;
Chris@320 290 }
Chris@320 291
Chris@441 292 bool preDurationPlugin = (m_plugin->getVampApiVersion() < 2);
Chris@441 293
Chris@849 294 Model *out = 0;
Chris@849 295
Chris@441 296 if (binCount == 0 &&
Chris@849 297 (preDurationPlugin || !m_descriptors[n]->hasDuration)) {
Chris@320 298
Chris@445 299 // Anything with no value and no duration is an instant
Chris@445 300
Chris@849 301 out = new SparseOneDimensionalModel(modelRate, modelResolution, false);
Chris@558 302 QString outputEventTypeURI = description.getOutputEventTypeURI(outputId);
Chris@849 303 out->setRDFTypeURI(outputEventTypeURI);
Chris@558 304
Chris@441 305 } else if ((preDurationPlugin && binCount > 1 &&
Chris@849 306 (m_descriptors[n]->sampleType ==
Chris@441 307 Vamp::Plugin::OutputDescriptor::VariableSampleRate)) ||
Chris@849 308 (!preDurationPlugin && m_descriptors[n]->hasDuration)) {
Chris@441 309
Chris@441 310 // For plugins using the old v1 API without explicit duration,
Chris@441 311 // we treat anything that has multiple bins (i.e. that has the
Chris@441 312 // potential to have value and duration) and a variable sample
Chris@441 313 // rate as a note model, taking its values as pitch, duration
Chris@441 314 // and velocity (if present) respectively. This is the same
Chris@441 315 // behaviour as always applied by SV to these plugins in the
Chris@441 316 // past.
Chris@441 317
Chris@441 318 // For plugins with the newer API, we treat anything with
Chris@441 319 // duration as either a note model with pitch and velocity, or
Chris@441 320 // a region model.
Chris@441 321
Chris@441 322 // How do we know whether it's an interval or note model?
Chris@441 323 // What's the essential difference? Is a note model any
Chris@441 324 // interval model using a Hz or "MIDI pitch" scale? There
Chris@441 325 // isn't really a reliable test for "MIDI pitch"... Does a
Chris@441 326 // note model always have velocity? This is a good question
Chris@441 327 // to be addressed by accompanying RDF, but for the moment we
Chris@441 328 // will do the following...
Chris@441 329
Chris@441 330 bool isNoteModel = false;
Chris@441 331
Chris@441 332 // Regions have only value (and duration -- we can't extract a
Chris@441 333 // region model from an old-style plugin that doesn't support
Chris@441 334 // duration)
Chris@441 335 if (binCount > 1) isNoteModel = true;
Chris@441 336
Chris@595 337 // Regions do not have units of Hz or MIDI things (a sweeping
Chris@595 338 // assumption!)
Chris@849 339 if (m_descriptors[n]->unit == "Hz" ||
Chris@849 340 m_descriptors[n]->unit.find("MIDI") != std::string::npos ||
Chris@849 341 m_descriptors[n]->unit.find("midi") != std::string::npos) {
Chris@595 342 isNoteModel = true;
Chris@595 343 }
Chris@441 344
Chris@441 345 // If we had a "sparse 3D model", we would have the additional
Chris@441 346 // problem of determining whether to use that here (if bin
Chris@441 347 // count > 1). But we don't.
Chris@441 348
Chris@859 349 QSettings settings;
Chris@859 350 settings.beginGroup("Transformer");
Chris@859 351 bool flexi = settings.value("use-flexi-note-model", false).toBool();
Chris@859 352 settings.endGroup();
Chris@859 353
Chris@859 354 cerr << "flexi = " << flexi << endl;
Chris@859 355
Chris@859 356 if (isNoteModel && !flexi) {
Chris@441 357
Chris@441 358 NoteModel *model;
Chris@441 359 if (haveExtents) {
Chris@859 360 model = new NoteModel
Chris@859 361 (modelRate, modelResolution, minValue, maxValue, false);
Chris@441 362 } else {
Chris@859 363 model = new NoteModel
Chris@859 364 (modelRate, modelResolution, false);
gyorgyf@786 365 }
Chris@849 366 model->setScaleUnits(m_descriptors[n]->unit.c_str());
Chris@849 367 out = model;
gyorgyf@786 368
Chris@859 369 } else if (isNoteModel && flexi) {
gyorgyf@786 370
gyorgyf@786 371 FlexiNoteModel *model;
gyorgyf@786 372 if (haveExtents) {
Chris@859 373 model = new FlexiNoteModel
Chris@859 374 (modelRate, modelResolution, minValue, maxValue, false);
gyorgyf@786 375 } else {
Chris@859 376 model = new FlexiNoteModel
Chris@859 377 (modelRate, modelResolution, false);
Chris@441 378 }
Chris@849 379 model->setScaleUnits(m_descriptors[n]->unit.c_str());
Chris@849 380 out = model;
Chris@441 381
Chris@441 382 } else {
Chris@441 383
Chris@441 384 RegionModel *model;
Chris@441 385 if (haveExtents) {
Chris@441 386 model = new RegionModel
Chris@441 387 (modelRate, modelResolution, minValue, maxValue, false);
Chris@441 388 } else {
Chris@441 389 model = new RegionModel
Chris@441 390 (modelRate, modelResolution, false);
Chris@441 391 }
Chris@849 392 model->setScaleUnits(m_descriptors[n]->unit.c_str());
Chris@849 393 out = model;
Chris@441 394 }
Chris@441 395
Chris@558 396 QString outputEventTypeURI = description.getOutputEventTypeURI(outputId);
Chris@849 397 out->setRDFTypeURI(outputEventTypeURI);
Chris@558 398
Chris@876 399 } else if (binCount == 1 ||
Chris@849 400 (m_descriptors[n]->sampleType ==
Chris@441 401 Vamp::Plugin::OutputDescriptor::VariableSampleRate)) {
Chris@441 402
Chris@441 403 // Anything that is not a 1D, note, or interval model and that
Chris@441 404 // has only one value per result must be a sparse time value
Chris@441 405 // model.
Chris@441 406
Chris@441 407 // Anything that is not a 1D, note, or interval model and that
Chris@876 408 // has a variable sample rate is treated as a set of sparse
Chris@876 409 // time value models, one per output bin, because we lack a
Chris@441 410 // sparse 3D model.
Chris@320 411
Chris@876 412 // Anything that is not a 1D, note, or interval model and that
Chris@876 413 // has a fixed sample rate but an unknown number of values per
Chris@876 414 // result is also treated as a set of sparse time value models.
Chris@876 415
Chris@876 416 // For sets of sparse time value models, we create a single
Chris@876 417 // model first as the "standard" output and then create models
Chris@876 418 // for bins 1+ in the additional model map (mapping the output
Chris@876 419 // descriptor to a list of models indexed by bin-1). But we
Chris@876 420 // don't create the additional models yet, as this case has to
Chris@876 421 // work even if the number of bins is unknown at this point --
Chris@877 422 // we create an additional model (copying its parameters from
Chris@877 423 // the default one) each time a new bin is encountered.
Chris@876 424
Chris@876 425 if (!haveBinCount || binCount > 1) {
Chris@876 426 m_needAdditionalModels[n] = true;
Chris@876 427 }
Chris@876 428
Chris@320 429 SparseTimeValueModel *model;
Chris@320 430 if (haveExtents) {
Chris@320 431 model = new SparseTimeValueModel
Chris@320 432 (modelRate, modelResolution, minValue, maxValue, false);
Chris@320 433 } else {
Chris@320 434 model = new SparseTimeValueModel
Chris@320 435 (modelRate, modelResolution, false);
Chris@320 436 }
Chris@558 437
Chris@558 438 Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors();
Chris@849 439 model->setScaleUnits(outputs[m_outputNos[n]].unit.c_str());
Chris@320 440
Chris@849 441 out = model;
Chris@320 442
Chris@558 443 QString outputEventTypeURI = description.getOutputEventTypeURI(outputId);
Chris@849 444 out->setRDFTypeURI(outputEventTypeURI);
Chris@558 445
Chris@441 446 } else {
Chris@320 447
Chris@441 448 // Anything that is not a 1D, note, or interval model and that
Chris@441 449 // has a fixed sample rate and more than one value per result
Chris@441 450 // must be a dense 3D model.
Chris@320 451
Chris@320 452 EditableDenseThreeDimensionalModel *model =
Chris@320 453 new EditableDenseThreeDimensionalModel
Chris@535 454 (modelRate, modelResolution, binCount,
Chris@535 455 EditableDenseThreeDimensionalModel::BasicMultirateCompression,
Chris@535 456 false);
Chris@320 457
Chris@849 458 if (!m_descriptors[n]->binNames.empty()) {
Chris@320 459 std::vector<QString> names;
Chris@930 460 for (int i = 0; i < (int)m_descriptors[n]->binNames.size(); ++i) {
Chris@849 461 names.push_back(m_descriptors[n]->binNames[i].c_str());
Chris@320 462 }
Chris@320 463 model->setBinNames(names);
Chris@320 464 }
Chris@320 465
Chris@849 466 out = model;
Chris@558 467
Chris@558 468 QString outputSignalTypeURI = description.getOutputSignalTypeURI(outputId);
Chris@849 469 out->setRDFTypeURI(outputSignalTypeURI);
Chris@320 470 }
Chris@333 471
Chris@849 472 if (out) {
Chris@849 473 out->setSourceModel(input);
Chris@849 474 m_outputs.push_back(out);
Chris@849 475 }
Chris@320 476 }
Chris@320 477
Chris@331 478 FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer()
Chris@320 479 {
Chris@690 480 // SVDEBUG << "FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer()" << endl;
Chris@320 481 delete m_plugin;
Chris@930 482 for (int j = 0; j < (int)m_descriptors.size(); ++j) {
Chris@850 483 delete m_descriptors[j];
Chris@850 484 }
Chris@320 485 }
Chris@320 486
Chris@876 487 FeatureExtractionModelTransformer::Models
Chris@876 488 FeatureExtractionModelTransformer::getAdditionalOutputModels()
Chris@876 489 {
Chris@876 490 Models mm;
Chris@876 491 for (AdditionalModelMap::iterator i = m_additionalModels.begin();
Chris@876 492 i != m_additionalModels.end(); ++i) {
Chris@876 493 for (std::map<int, SparseTimeValueModel *>::iterator j =
Chris@876 494 i->second.begin();
Chris@876 495 j != i->second.end(); ++j) {
Chris@876 496 SparseTimeValueModel *m = j->second;
Chris@876 497 if (m) mm.push_back(m);
Chris@876 498 }
Chris@876 499 }
Chris@876 500 return mm;
Chris@876 501 }
Chris@876 502
Chris@877 503 bool
Chris@877 504 FeatureExtractionModelTransformer::willHaveAdditionalOutputModels()
Chris@877 505 {
Chris@877 506 for (std::map<int, bool>::const_iterator i =
Chris@877 507 m_needAdditionalModels.begin();
Chris@877 508 i != m_needAdditionalModels.end(); ++i) {
Chris@877 509 if (i->second) return true;
Chris@877 510 }
Chris@877 511 return false;
Chris@877 512 }
Chris@877 513
Chris@876 514 SparseTimeValueModel *
Chris@876 515 FeatureExtractionModelTransformer::getAdditionalModel(int n, int binNo)
Chris@876 516 {
Chris@893 517 // std::cerr << "getAdditionalModel(" << n << ", " << binNo << ")" << std::endl;
Chris@876 518
Chris@876 519 if (binNo == 0) {
Chris@876 520 std::cerr << "Internal error: binNo == 0 in getAdditionalModel (should be using primary model)" << std::endl;
Chris@876 521 return 0;
Chris@876 522 }
Chris@876 523
Chris@876 524 if (!m_needAdditionalModels[n]) return 0;
Chris@876 525 if (!isOutput<SparseTimeValueModel>(n)) return 0;
Chris@876 526 if (m_additionalModels[n][binNo]) return m_additionalModels[n][binNo];
Chris@876 527
Chris@876 528 std::cerr << "getAdditionalModel(" << n << ", " << binNo << "): creating" << std::endl;
Chris@876 529
Chris@876 530 SparseTimeValueModel *baseModel = getConformingOutput<SparseTimeValueModel>(n);
Chris@876 531 if (!baseModel) return 0;
Chris@876 532
Chris@876 533 std::cerr << "getAdditionalModel(" << n << ", " << binNo << "): (from " << baseModel << ")" << std::endl;
Chris@876 534
Chris@876 535 SparseTimeValueModel *additional =
Chris@876 536 new SparseTimeValueModel(baseModel->getSampleRate(),
Chris@876 537 baseModel->getResolution(),
Chris@876 538 baseModel->getValueMinimum(),
Chris@876 539 baseModel->getValueMaximum(),
Chris@876 540 false);
Chris@876 541
Chris@876 542 additional->setScaleUnits(baseModel->getScaleUnits());
Chris@876 543 additional->setRDFTypeURI(baseModel->getRDFTypeURI());
Chris@876 544
Chris@876 545 m_additionalModels[n][binNo] = additional;
Chris@876 546 return additional;
Chris@876 547 }
Chris@876 548
Chris@320 549 DenseTimeValueModel *
Chris@350 550 FeatureExtractionModelTransformer::getConformingInput()
Chris@320 551 {
Chris@690 552 // SVDEBUG << "FeatureExtractionModelTransformer::getConformingInput: input model is " << getInputModel() << endl;
Chris@408 553
Chris@320 554 DenseTimeValueModel *dtvm =
Chris@320 555 dynamic_cast<DenseTimeValueModel *>(getInputModel());
Chris@320 556 if (!dtvm) {
Chris@690 557 SVDEBUG << "FeatureExtractionModelTransformer::getConformingInput: WARNING: Input model is not conformable to DenseTimeValueModel" << endl;
Chris@320 558 }
Chris@320 559 return dtvm;
Chris@320 560 }
Chris@320 561
Chris@320 562 void
Chris@331 563 FeatureExtractionModelTransformer::run()
Chris@320 564 {
Chris@350 565 DenseTimeValueModel *input = getConformingInput();
Chris@320 566 if (!input) return;
Chris@320 567
Chris@849 568 if (m_outputs.empty()) return;
Chris@320 569
Chris@850 570 Transform primaryTransform = m_transforms[0];
Chris@850 571
Chris@497 572 while (!input->isReady() && !m_abandoned) {
Chris@877 573 cerr << "FeatureExtractionModelTransformer::run: Waiting for input model to be ready..." << endl;
Chris@497 574 usleep(500000);
Chris@320 575 }
Chris@497 576 if (m_abandoned) return;
Chris@320 577
Chris@1040 578 sv_samplerate_t sampleRate = input->getSampleRate();
Chris@320 579
Chris@930 580 int channelCount = input->getChannelCount();
Chris@930 581 if ((int)m_plugin->getMaxChannelCount() < channelCount) {
Chris@320 582 channelCount = 1;
Chris@320 583 }
Chris@320 584
Chris@320 585 float **buffers = new float*[channelCount];
Chris@930 586 for (int ch = 0; ch < channelCount; ++ch) {
Chris@850 587 buffers[ch] = new float[primaryTransform.getBlockSize() + 2];
Chris@320 588 }
Chris@320 589
Chris@930 590 int stepSize = primaryTransform.getStepSize();
Chris@930 591 int blockSize = primaryTransform.getBlockSize();
Chris@350 592
Chris@320 593 bool frequencyDomain = (m_plugin->getInputDomain() ==
Chris@320 594 Vamp::Plugin::FrequencyDomain);
Chris@320 595 std::vector<FFTModel *> fftModels;
Chris@320 596
Chris@320 597 if (frequencyDomain) {
Chris@930 598 for (int ch = 0; ch < channelCount; ++ch) {
Chris@320 599 FFTModel *model = new FFTModel
Chris@350 600 (getConformingInput(),
Chris@350 601 channelCount == 1 ? m_input.getChannel() : ch,
Chris@850 602 primaryTransform.getWindowType(),
Chris@350 603 blockSize,
Chris@350 604 stepSize,
Chris@350 605 blockSize,
Chris@334 606 false,
Chris@334 607 StorageAdviser::PrecisionCritical);
Chris@320 608 if (!model->isOK()) {
Chris@320 609 delete model;
Chris@850 610 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@850 611 setCompletion(j, 100);
Chris@850 612 }
Chris@387 613 //!!! need a better way to handle this -- previously we were using a QMessageBox but that isn't an appropriate thing to do here either
Chris@387 614 throw AllocationFailed("Failed to create the FFT model for this feature extraction model transformer");
Chris@320 615 }
Chris@320 616 model->resume();
Chris@320 617 fftModels.push_back(model);
Chris@320 618 }
Chris@320 619 }
Chris@320 620
Chris@1040 621 sv_frame_t startFrame = m_input.getModel()->getStartFrame();
Chris@1040 622 sv_frame_t endFrame = m_input.getModel()->getEndFrame();
Chris@320 623
Chris@850 624 RealTime contextStartRT = primaryTransform.getStartTime();
Chris@850 625 RealTime contextDurationRT = primaryTransform.getDuration();
Chris@350 626
Chris@1040 627 sv_frame_t contextStart =
Chris@350 628 RealTime::realTime2Frame(contextStartRT, sampleRate);
Chris@350 629
Chris@1040 630 sv_frame_t contextDuration =
Chris@350 631 RealTime::realTime2Frame(contextDurationRT, sampleRate);
Chris@320 632
Chris@320 633 if (contextStart == 0 || contextStart < startFrame) {
Chris@320 634 contextStart = startFrame;
Chris@320 635 }
Chris@320 636
Chris@320 637 if (contextDuration == 0) {
Chris@320 638 contextDuration = endFrame - contextStart;
Chris@320 639 }
Chris@320 640 if (contextStart + contextDuration > endFrame) {
Chris@320 641 contextDuration = endFrame - contextStart;
Chris@320 642 }
Chris@320 643
Chris@1039 644 sv_frame_t blockFrame = contextStart;
Chris@320 645
Chris@320 646 long prevCompletion = 0;
Chris@320 647
Chris@850 648 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@850 649 setCompletion(j, 0);
Chris@850 650 }
Chris@320 651
Chris@556 652 float *reals = 0;
Chris@556 653 float *imaginaries = 0;
Chris@556 654 if (frequencyDomain) {
Chris@556 655 reals = new float[blockSize/2 + 1];
Chris@556 656 imaginaries = new float[blockSize/2 + 1];
Chris@556 657 }
Chris@556 658
Chris@678 659 QString error = "";
Chris@678 660
Chris@320 661 while (!m_abandoned) {
Chris@320 662
Chris@320 663 if (frequencyDomain) {
Chris@350 664 if (blockFrame - int(blockSize)/2 >
Chris@320 665 contextStart + contextDuration) break;
Chris@320 666 } else {
Chris@320 667 if (blockFrame >=
Chris@320 668 contextStart + contextDuration) break;
Chris@320 669 }
Chris@320 670
Chris@690 671 // SVDEBUG << "FeatureExtractionModelTransformer::run: blockFrame "
Chris@320 672 // << blockFrame << ", endFrame " << endFrame << ", blockSize "
Chris@687 673 // << blockSize << endl;
Chris@320 674
Chris@1039 675 int completion = int
Chris@1039 676 ((((blockFrame - contextStart) / stepSize) * 99) /
Chris@1039 677 (contextDuration / stepSize + 1));
Chris@320 678
Chris@350 679 // channelCount is either m_input.getModel()->channelCount or 1
Chris@320 680
Chris@363 681 if (frequencyDomain) {
Chris@930 682 for (int ch = 0; ch < channelCount; ++ch) {
Chris@1039 683 int column = int((blockFrame - startFrame) / stepSize);
Chris@1008 684 if (fftModels[ch]->getValuesAt(column, reals, imaginaries)) {
Chris@1008 685 for (int i = 0; i <= blockSize/2; ++i) {
Chris@1008 686 buffers[ch][i*2] = reals[i];
Chris@1008 687 buffers[ch][i*2+1] = imaginaries[i];
Chris@1008 688 }
Chris@1008 689 } else {
Chris@1008 690 for (int i = 0; i <= blockSize/2; ++i) {
Chris@1008 691 buffers[ch][i*2] = 0.f;
Chris@1008 692 buffers[ch][i*2+1] = 0.f;
Chris@1008 693 }
Chris@1008 694 }
Chris@678 695 error = fftModels[ch]->getError();
Chris@678 696 if (error != "") {
Chris@843 697 cerr << "FeatureExtractionModelTransformer::run: Abandoning, error is " << error << endl;
Chris@678 698 m_abandoned = true;
Chris@678 699 m_message = error;
Chris@678 700 }
Chris@363 701 }
Chris@363 702 } else {
Chris@363 703 getFrames(channelCount, blockFrame, blockSize, buffers);
Chris@320 704 }
Chris@320 705
Chris@497 706 if (m_abandoned) break;
Chris@497 707
Chris@320 708 Vamp::Plugin::FeatureSet features = m_plugin->process
Chris@1040 709 (buffers, RealTime::frame2RealTime(blockFrame, sampleRate).toVampRealTime());
Chris@320 710
Chris@497 711 if (m_abandoned) break;
Chris@497 712
Chris@850 713 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@930 714 for (int fi = 0; fi < (int)features[m_outputNos[j]].size(); ++fi) {
Chris@850 715 Vamp::Plugin::Feature feature = features[m_outputNos[j]][fi];
Chris@850 716 addFeature(j, blockFrame, feature);
Chris@850 717 }
Chris@850 718 }
Chris@320 719
Chris@320 720 if (blockFrame == contextStart || completion > prevCompletion) {
Chris@850 721 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@850 722 setCompletion(j, completion);
Chris@850 723 }
Chris@320 724 prevCompletion = completion;
Chris@320 725 }
Chris@320 726
Chris@350 727 blockFrame += stepSize;
Chris@320 728 }
Chris@320 729
Chris@497 730 if (!m_abandoned) {
Chris@497 731 Vamp::Plugin::FeatureSet features = m_plugin->getRemainingFeatures();
Chris@320 732
Chris@850 733 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@930 734 for (int fi = 0; fi < (int)features[m_outputNos[j]].size(); ++fi) {
Chris@850 735 Vamp::Plugin::Feature feature = features[m_outputNos[j]][fi];
Chris@850 736 addFeature(j, blockFrame, feature);
Chris@850 737 }
Chris@497 738 }
Chris@497 739 }
Chris@320 740
Chris@850 741 for (int j = 0; j < (int)m_outputNos.size(); ++j) {
Chris@850 742 setCompletion(j, 100);
Chris@850 743 }
Chris@320 744
Chris@320 745 if (frequencyDomain) {
Chris@930 746 for (int ch = 0; ch < channelCount; ++ch) {
Chris@320 747 delete fftModels[ch];
Chris@320 748 }
Chris@556 749 delete[] reals;
Chris@556 750 delete[] imaginaries;
Chris@320 751 }
Chris@974 752
Chris@974 753 for (int ch = 0; ch < channelCount; ++ch) {
Chris@974 754 delete[] buffers[ch];
Chris@974 755 }
Chris@974 756 delete[] buffers;
Chris@320 757 }
Chris@320 758
Chris@320 759 void
Chris@363 760 FeatureExtractionModelTransformer::getFrames(int channelCount,
Chris@1039 761 sv_frame_t startFrame,
Chris@1039 762 sv_frame_t size,
Chris@363 763 float **buffers)
Chris@320 764 {
Chris@1039 765 sv_frame_t offset = 0;
Chris@320 766
Chris@320 767 if (startFrame < 0) {
Chris@363 768 for (int c = 0; c < channelCount; ++c) {
Chris@1039 769 for (sv_frame_t i = 0; i < size && startFrame + i < 0; ++i) {
Chris@363 770 buffers[c][i] = 0.0f;
Chris@363 771 }
Chris@320 772 }
Chris@320 773 offset = -startFrame;
Chris@320 774 size -= offset;
Chris@320 775 if (size <= 0) return;
Chris@320 776 startFrame = 0;
Chris@320 777 }
Chris@320 778
Chris@350 779 DenseTimeValueModel *input = getConformingInput();
Chris@350 780 if (!input) return;
Chris@363 781
Chris@1039 782 sv_frame_t got = 0;
Chris@350 783
Chris@363 784 if (channelCount == 1) {
Chris@363 785
Chris@363 786 got = input->getData(m_input.getChannel(), startFrame, size,
Chris@363 787 buffers[0] + offset);
Chris@363 788
Chris@363 789 if (m_input.getChannel() == -1 && input->getChannelCount() > 1) {
Chris@363 790 // use mean instead of sum, as plugin input
Chris@363 791 float cc = float(input->getChannelCount());
Chris@1039 792 for (sv_frame_t i = 0; i < size; ++i) {
Chris@363 793 buffers[0][i + offset] /= cc;
Chris@363 794 }
Chris@363 795 }
Chris@363 796
Chris@363 797 } else {
Chris@363 798
Chris@363 799 float **writebuf = buffers;
Chris@363 800 if (offset > 0) {
Chris@363 801 writebuf = new float *[channelCount];
Chris@363 802 for (int i = 0; i < channelCount; ++i) {
Chris@363 803 writebuf[i] = buffers[i] + offset;
Chris@363 804 }
Chris@363 805 }
Chris@363 806
Chris@363 807 got = input->getData(0, channelCount-1, startFrame, size, writebuf);
Chris@363 808
Chris@363 809 if (writebuf != buffers) delete[] writebuf;
Chris@363 810 }
Chris@320 811
Chris@320 812 while (got < size) {
Chris@363 813 for (int c = 0; c < channelCount; ++c) {
Chris@363 814 buffers[c][got + offset] = 0.0;
Chris@363 815 }
Chris@320 816 ++got;
Chris@320 817 }
Chris@320 818 }
Chris@320 819
Chris@320 820 void
Chris@850 821 FeatureExtractionModelTransformer::addFeature(int n,
Chris@1039 822 sv_frame_t blockFrame,
Chris@850 823 const Vamp::Plugin::Feature &feature)
Chris@320 824 {
Chris@1040 825 sv_samplerate_t inputRate = m_input.getModel()->getSampleRate();
Chris@320 826
Chris@843 827 // cerr << "FeatureExtractionModelTransformer::addFeature: blockFrame = "
Chris@712 828 // << blockFrame << ", hasTimestamp = " << feature.hasTimestamp
Chris@712 829 // << ", timestamp = " << feature.timestamp << ", hasDuration = "
Chris@712 830 // << feature.hasDuration << ", duration = " << feature.duration
Chris@843 831 // << endl;
Chris@320 832
Chris@1039 833 sv_frame_t frame = blockFrame;
Chris@320 834
Chris@849 835 if (m_descriptors[n]->sampleType ==
Chris@320 836 Vamp::Plugin::OutputDescriptor::VariableSampleRate) {
Chris@320 837
Chris@320 838 if (!feature.hasTimestamp) {
Chris@843 839 cerr
Chris@331 840 << "WARNING: FeatureExtractionModelTransformer::addFeature: "
Chris@320 841 << "Feature has variable sample rate but no timestamp!"
Chris@843 842 << endl;
Chris@320 843 return;
Chris@320 844 } else {
Chris@1040 845 frame = RealTime::realTime2Frame(feature.timestamp, inputRate);
Chris@320 846 }
Chris@320 847
Chris@1071 848 // cerr << "variable sample rate: timestamp = " << feature.timestamp
Chris@1071 849 // << " at input rate " << inputRate << " -> " << frame << endl;
Chris@1071 850
Chris@849 851 } else if (m_descriptors[n]->sampleType ==
Chris@320 852 Vamp::Plugin::OutputDescriptor::FixedSampleRate) {
Chris@320 853
Chris@1071 854 sv_samplerate_t rate = m_descriptors[n]->sampleRate;
Chris@1071 855 if (rate <= 0.0) {
Chris@1071 856 rate = inputRate;
Chris@1071 857 }
Chris@1071 858
Chris@779 859 if (!feature.hasTimestamp) {
Chris@849 860 ++m_fixedRateFeatureNos[n];
Chris@779 861 } else {
Chris@779 862 RealTime ts(feature.timestamp.sec, feature.timestamp.nsec);
Chris@1071 863 m_fixedRateFeatureNos[n] = (int)lrint(ts.toDouble() * rate);
Chris@779 864 }
Chris@862 865
Chris@1071 866 // cerr << "m_fixedRateFeatureNo = " << m_fixedRateFeatureNos[n]
Chris@1071 867 // << ", m_descriptor->sampleRate = " << m_descriptors[n]->sampleRate
Chris@862 868 // << ", inputRate = " << inputRate
Chris@862 869 // << " giving frame = ";
Chris@1071 870 frame = lrint((double(m_fixedRateFeatureNos[n]) / rate) * inputRate);
Chris@1071 871 // cerr << frame << endl;
Chris@320 872 }
Chris@862 873
Chris@862 874 if (frame < 0) {
Chris@862 875 cerr
Chris@862 876 << "WARNING: FeatureExtractionModelTransformer::addFeature: "
Chris@862 877 << "Negative frame counts are not supported (frame = " << frame
Chris@862 878 << " from timestamp " << feature.timestamp
Chris@862 879 << "), dropping feature"
Chris@862 880 << endl;
Chris@862 881 return;
Chris@862 882 }
Chris@862 883
Chris@441 884 // Rather than repeat the complicated tests from the constructor
Chris@441 885 // to determine what sort of model we must be adding the features
Chris@441 886 // to, we instead test what sort of model the constructor decided
Chris@441 887 // to create.
Chris@320 888
Chris@849 889 if (isOutput<SparseOneDimensionalModel>(n)) {
Chris@441 890
Chris@441 891 SparseOneDimensionalModel *model =
Chris@849 892 getConformingOutput<SparseOneDimensionalModel>(n);
Chris@320 893 if (!model) return;
Chris@350 894
Chris@441 895 model->addPoint(SparseOneDimensionalModel::Point
Chris@441 896 (frame, feature.label.c_str()));
Chris@320 897
Chris@849 898 } else if (isOutput<SparseTimeValueModel>(n)) {
Chris@320 899
Chris@350 900 SparseTimeValueModel *model =
Chris@849 901 getConformingOutput<SparseTimeValueModel>(n);
Chris@320 902 if (!model) return;
Chris@350 903
Chris@930 904 for (int i = 0; i < (int)feature.values.size(); ++i) {
Chris@454 905
Chris@454 906 float value = feature.values[i];
Chris@454 907
Chris@454 908 QString label = feature.label.c_str();
Chris@454 909 if (feature.values.size() > 1) {
Chris@454 910 label = QString("[%1] %2").arg(i+1).arg(label);
Chris@454 911 }
Chris@454 912
Chris@876 913 SparseTimeValueModel *targetModel = model;
Chris@876 914
Chris@876 915 if (m_needAdditionalModels[n] && i > 0) {
Chris@876 916 targetModel = getAdditionalModel(n, i);
Chris@876 917 if (!targetModel) targetModel = model;
Chris@893 918 // std::cerr << "adding point to model " << targetModel
Chris@893 919 // << " for output " << n << " bin " << i << std::endl;
Chris@876 920 }
Chris@876 921
Chris@876 922 targetModel->addPoint
Chris@876 923 (SparseTimeValueModel::Point(frame, value, label));
Chris@454 924 }
Chris@320 925
Chris@849 926 } else if (isOutput<FlexiNoteModel>(n) || isOutput<NoteModel>(n) || isOutput<RegionModel>(n)) { //GF: Added Note Model
Chris@320 927
Chris@441 928 int index = 0;
Chris@441 929
Chris@441 930 float value = 0.0;
Chris@930 931 if ((int)feature.values.size() > index) {
Chris@441 932 value = feature.values[index++];
Chris@441 933 }
Chris@320 934
Chris@1039 935 sv_frame_t duration = 1;
Chris@441 936 if (feature.hasDuration) {
Chris@1040 937 duration = RealTime::realTime2Frame(feature.duration, inputRate);
Chris@441 938 } else {
Chris@1039 939 if (in_range_for(feature.values, index)) {
Chris@1039 940 duration = lrintf(feature.values[index++]);
Chris@441 941 }
Chris@441 942 }
gyorgyf@786 943
Chris@891 944 if (isOutput<FlexiNoteModel>(n)) { // GF: added for flexi note model
gyorgyf@786 945
gyorgyf@786 946 float velocity = 100;
Chris@930 947 if ((int)feature.values.size() > index) {
gyorgyf@786 948 velocity = feature.values[index++];
gyorgyf@786 949 }
gyorgyf@786 950 if (velocity < 0) velocity = 127;
gyorgyf@786 951 if (velocity > 127) velocity = 127;
gyorgyf@786 952
Chris@849 953 FlexiNoteModel *model = getConformingOutput<FlexiNoteModel>(n);
gyorgyf@786 954 if (!model) return;
Chris@1039 955 model->addPoint(FlexiNoteModel::Point(frame,
Chris@1039 956 value, // value is pitch
Chris@1039 957 duration,
Chris@1039 958 velocity / 127.f,
Chris@1039 959 feature.label.c_str()));
gyorgyf@786 960 // GF: end -- added for flexi note model
Chris@849 961 } else if (isOutput<NoteModel>(n)) {
Chris@320 962
Chris@441 963 float velocity = 100;
Chris@930 964 if ((int)feature.values.size() > index) {
Chris@441 965 velocity = feature.values[index++];
Chris@441 966 }
Chris@441 967 if (velocity < 0) velocity = 127;
Chris@441 968 if (velocity > 127) velocity = 127;
Chris@320 969
Chris@849 970 NoteModel *model = getConformingOutput<NoteModel>(n);
Chris@441 971 if (!model) return;
Chris@441 972 model->addPoint(NoteModel::Point(frame, value, // value is pitch
Chris@1039 973 duration,
Chris@441 974 velocity / 127.f,
Chris@441 975 feature.label.c_str()));
Chris@441 976 } else {
gyorgyf@786 977
Chris@849 978 RegionModel *model = getConformingOutput<RegionModel>(n);
Chris@454 979 if (!model) return;
Chris@454 980
Chris@474 981 if (feature.hasDuration && !feature.values.empty()) {
Chris@454 982
Chris@930 983 for (int i = 0; i < (int)feature.values.size(); ++i) {
Chris@454 984
Chris@454 985 float value = feature.values[i];
Chris@454 986
Chris@454 987 QString label = feature.label.c_str();
Chris@454 988 if (feature.values.size() > 1) {
Chris@454 989 label = QString("[%1] %2").arg(i+1).arg(label);
Chris@454 990 }
Chris@454 991
Chris@1039 992 model->addPoint(RegionModel::Point(frame,
Chris@1039 993 value,
Chris@1039 994 duration,
Chris@454 995 label));
Chris@454 996 }
Chris@454 997 } else {
Chris@454 998
Chris@1039 999 model->addPoint(RegionModel::Point(frame,
Chris@1039 1000 value,
Chris@1039 1001 duration,
Chris@441 1002 feature.label.c_str()));
Chris@454 1003 }
Chris@441 1004 }
Chris@320 1005
Chris@849 1006 } else if (isOutput<EditableDenseThreeDimensionalModel>(n)) {
Chris@320 1007
Chris@533 1008 DenseThreeDimensionalModel::Column values =
Chris@533 1009 DenseThreeDimensionalModel::Column::fromStdVector(feature.values);
Chris@320 1010
Chris@320 1011 EditableDenseThreeDimensionalModel *model =
Chris@849 1012 getConformingOutput<EditableDenseThreeDimensionalModel>(n);
Chris@320 1013 if (!model) return;
Chris@320 1014
Chris@889 1015 // cerr << "(note: model resolution = " << model->getResolution() << ")"
Chris@889 1016 // << endl;
Chris@889 1017
Chris@891 1018 if (!feature.hasTimestamp && m_fixedRateFeatureNos[n] >= 0) {
Chris@891 1019 model->setColumn(m_fixedRateFeatureNos[n], values);
Chris@889 1020 } else {
Chris@1039 1021 model->setColumn(int(frame / model->getResolution()), values);
Chris@889 1022 }
Chris@441 1023
Chris@441 1024 } else {
Chris@690 1025 SVDEBUG << "FeatureExtractionModelTransformer::addFeature: Unknown output model type!" << endl;
Chris@320 1026 }
Chris@320 1027 }
Chris@320 1028
Chris@320 1029 void
Chris@850 1030 FeatureExtractionModelTransformer::setCompletion(int n, int completion)
Chris@320 1031 {
Chris@690 1032 // SVDEBUG << "FeatureExtractionModelTransformer::setCompletion("
Chris@687 1033 // << completion << ")" << endl;
Chris@320 1034
Chris@849 1035 if (isOutput<SparseOneDimensionalModel>(n)) {
Chris@320 1036
Chris@350 1037 SparseOneDimensionalModel *model =
Chris@849 1038 getConformingOutput<SparseOneDimensionalModel>(n);
Chris@320 1039 if (!model) return;
Chris@923 1040 if (model->isAbandoning()) abandon();
Chris@441 1041 model->setCompletion(completion, true);
Chris@320 1042
Chris@849 1043 } else if (isOutput<SparseTimeValueModel>(n)) {
Chris@320 1044
Chris@350 1045 SparseTimeValueModel *model =
Chris@849 1046 getConformingOutput<SparseTimeValueModel>(n);
Chris@320 1047 if (!model) return;
Chris@923 1048 if (model->isAbandoning()) abandon();
Chris@441 1049 model->setCompletion(completion, true);
Chris@320 1050
Chris@849 1051 } else if (isOutput<NoteModel>(n)) {
Chris@320 1052
Chris@849 1053 NoteModel *model = getConformingOutput<NoteModel>(n);
Chris@320 1054 if (!model) return;
Chris@923 1055 if (model->isAbandoning()) abandon();
Chris@441 1056 model->setCompletion(completion, true);
gyorgyf@786 1057
Chris@923 1058 } else if (isOutput<FlexiNoteModel>(n)) {
gyorgyf@786 1059
Chris@849 1060 FlexiNoteModel *model = getConformingOutput<FlexiNoteModel>(n);
gyorgyf@786 1061 if (!model) return;
Chris@923 1062 if (model->isAbandoning()) abandon();
gyorgyf@786 1063 model->setCompletion(completion, true);
Chris@320 1064
Chris@849 1065 } else if (isOutput<RegionModel>(n)) {
Chris@441 1066
Chris@849 1067 RegionModel *model = getConformingOutput<RegionModel>(n);
Chris@441 1068 if (!model) return;
Chris@923 1069 if (model->isAbandoning()) abandon();
Chris@441 1070 model->setCompletion(completion, true);
Chris@441 1071
Chris@849 1072 } else if (isOutput<EditableDenseThreeDimensionalModel>(n)) {
Chris@320 1073
Chris@320 1074 EditableDenseThreeDimensionalModel *model =
Chris@849 1075 getConformingOutput<EditableDenseThreeDimensionalModel>(n);
Chris@320 1076 if (!model) return;
Chris@923 1077 if (model->isAbandoning()) abandon();
Chris@350 1078 model->setCompletion(completion, true); //!!!m_context.updates);
Chris@320 1079 }
Chris@320 1080 }
Chris@320 1081