| Chris@320 | 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */ | 
| Chris@320 | 2 | 
| Chris@320 | 3 /* | 
| Chris@320 | 4     Sonic Visualiser | 
| Chris@320 | 5     An audio file viewer and annotation editor. | 
| Chris@320 | 6     Centre for Digital Music, Queen Mary, University of London. | 
| Chris@320 | 7     This file copyright 2006 Chris Cannam and QMUL. | 
| Chris@320 | 8 | 
| Chris@320 | 9     This program is free software; you can redistribute it and/or | 
| Chris@320 | 10     modify it under the terms of the GNU General Public License as | 
| Chris@320 | 11     published by the Free Software Foundation; either version 2 of the | 
| Chris@320 | 12     License, or (at your option) any later version.  See the file | 
| Chris@320 | 13     COPYING included with this distribution for more information. | 
| Chris@320 | 14 */ | 
| Chris@320 | 15 | 
| Chris@331 | 16 #include "FeatureExtractionModelTransformer.h" | 
| Chris@320 | 17 | 
| Chris@320 | 18 #include "plugin/FeatureExtractionPluginFactory.h" | 
| Chris@1225 | 19 | 
| Chris@320 | 20 #include "plugin/PluginXml.h" | 
| Chris@475 | 21 #include <vamp-hostsdk/Plugin.h> | 
| Chris@320 | 22 | 
| Chris@320 | 23 #include "data/model/Model.h" | 
| Chris@320 | 24 #include "base/Window.h" | 
| Chris@387 | 25 #include "base/Exceptions.h" | 
| Chris@320 | 26 #include "data/model/SparseOneDimensionalModel.h" | 
| Chris@320 | 27 #include "data/model/SparseTimeValueModel.h" | 
| Chris@320 | 28 #include "data/model/EditableDenseThreeDimensionalModel.h" | 
| Chris@320 | 29 #include "data/model/DenseTimeValueModel.h" | 
| Chris@320 | 30 #include "data/model/NoteModel.h" | 
| gyorgyf@786 | 31 #include "data/model/FlexiNoteModel.h" | 
| Chris@441 | 32 #include "data/model/RegionModel.h" | 
| Chris@320 | 33 #include "data/model/FFTModel.h" | 
| Chris@320 | 34 #include "data/model/WaveFileModel.h" | 
| Chris@558 | 35 #include "rdf/PluginRDFDescription.h" | 
| Chris@320 | 36 | 
| Chris@350 | 37 #include "TransformFactory.h" | 
| Chris@350 | 38 | 
| Chris@320 | 39 #include <iostream> | 
| Chris@320 | 40 | 
| Chris@859 | 41 #include <QSettings> | 
| Chris@859 | 42 | 
| Chris@350 | 43 FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Input in, | 
| Chris@859 | 44                                                                      const Transform &transform) : | 
| Chris@350 | 45     ModelTransformer(in, transform), | 
| Chris@1211 | 46     m_plugin(0), | 
| Chris@1211 | 47     m_haveOutputs(false) | 
| Chris@320 | 48 { | 
| Chris@1080 | 49     SVDEBUG << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: plugin " << m_transforms.begin()->getPluginIdentifier() << ", outputName " << m_transforms.begin()->getOutput() << endl; | 
| Chris@849 | 50 } | 
| Chris@849 | 51 | 
| Chris@849 | 52 FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Input in, | 
| Chris@859 | 53                                                                      const Transforms &transforms) : | 
| Chris@849 | 54     ModelTransformer(in, transforms), | 
| Chris@1211 | 55     m_plugin(0), | 
| Chris@1211 | 56     m_haveOutputs(false) | 
| Chris@849 | 57 { | 
| Chris@1080 | 58     if (m_transforms.empty()) { | 
| Chris@1080 | 59         SVDEBUG << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: " << transforms.size() << " transform(s)" << endl; | 
| Chris@1080 | 60     } else { | 
| Chris@1080 | 61         SVDEBUG << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: " << transforms.size() << " transform(s), first has plugin " << m_transforms.begin()->getPluginIdentifier() << ", outputName " << m_transforms.begin()->getOutput() << endl; | 
| Chris@1080 | 62     } | 
| Chris@849 | 63 } | 
| Chris@849 | 64 | 
| Chris@849 | 65 static bool | 
| Chris@849 | 66 areTransformsSimilar(const Transform &t1, const Transform &t2) | 
| Chris@849 | 67 { | 
| Chris@849 | 68     Transform t2o(t2); | 
| Chris@849 | 69     t2o.setOutput(t1.getOutput()); | 
| Chris@849 | 70     return t1 == t2o; | 
| Chris@849 | 71 } | 
| Chris@849 | 72 | 
| Chris@849 | 73 bool | 
| Chris@849 | 74 FeatureExtractionModelTransformer::initialise() | 
| Chris@849 | 75 { | 
| Chris@1237 | 76     // This is (now) called from the run thread. The plugin is | 
| Chris@1237 | 77     // constructed, initialised, used, and destroyed all from a single | 
| Chris@1237 | 78     // thread. | 
| Chris@1237 | 79 | 
| Chris@849 | 80     // All transforms must use the same plugin, parameters, and | 
| Chris@849 | 81     // inputs: they can differ only in choice of plugin output. So we | 
| Chris@849 | 82     // initialise based purely on the first transform in the list (but | 
| Chris@849 | 83     // first check that they are actually similar as promised) | 
| Chris@849 | 84 | 
| Chris@849 | 85     for (int j = 1; j < (int)m_transforms.size(); ++j) { | 
| Chris@849 | 86         if (!areTransformsSimilar(m_transforms[0], m_transforms[j])) { | 
| Chris@849 | 87             m_message = tr("Transforms supplied to a single FeatureExtractionModelTransformer instance must be similar in every respect except plugin output"); | 
| Chris@1368 | 88             SVCERR << m_message << endl; | 
| Chris@849 | 89             return false; | 
| Chris@849 | 90         } | 
| Chris@849 | 91     } | 
| Chris@849 | 92 | 
| Chris@849 | 93     Transform primaryTransform = m_transforms[0]; | 
| Chris@849 | 94 | 
| Chris@849 | 95     QString pluginId = primaryTransform.getPluginIdentifier(); | 
| Chris@320 | 96 | 
| Chris@1226 | 97     FeatureExtractionPluginFactory *factory = | 
| Chris@1226 | 98         FeatureExtractionPluginFactory::instance(); | 
| Chris@320 | 99 | 
| Chris@320 | 100     if (!factory) { | 
| Chris@361 | 101         m_message = tr("No factory available for feature extraction plugin id \"%1\" (unknown plugin type, or internal error?)").arg(pluginId); | 
| Chris@1368 | 102         SVCERR << m_message << endl; | 
| Chris@849 | 103 	return false; | 
| Chris@320 | 104     } | 
| Chris@320 | 105 | 
| Chris@350 | 106     DenseTimeValueModel *input = getConformingInput(); | 
| Chris@350 | 107     if (!input) { | 
| Chris@361 | 108         m_message = tr("Input model for feature extraction plugin \"%1\" is of wrong type (internal error?)").arg(pluginId); | 
| Chris@1368 | 109         SVCERR << m_message << endl; | 
| Chris@849 | 110         return false; | 
| Chris@350 | 111     } | 
| Chris@320 | 112 | 
| Chris@1264 | 113     SVDEBUG << "FeatureExtractionModelTransformer: Instantiating plugin for transform in thread " | 
| Chris@1264 | 114             << QThread::currentThreadId() << endl; | 
| Chris@1211 | 115 | 
| Chris@1040 | 116     m_plugin = factory->instantiatePlugin(pluginId, input->getSampleRate()); | 
| Chris@320 | 117     if (!m_plugin) { | 
| Chris@361 | 118         m_message = tr("Failed to instantiate plugin \"%1\"").arg(pluginId); | 
| Chris@1368 | 119         SVCERR << m_message << endl; | 
| Chris@849 | 120 	return false; | 
| Chris@320 | 121     } | 
| Chris@320 | 122 | 
| Chris@350 | 123     TransformFactory::getInstance()->makeContextConsistentWithPlugin | 
| Chris@849 | 124         (primaryTransform, m_plugin); | 
| Chris@1368 | 125 | 
| Chris@350 | 126     TransformFactory::getInstance()->setPluginParameters | 
| Chris@849 | 127         (primaryTransform, m_plugin); | 
| Chris@1368 | 128 | 
| Chris@930 | 129     int channelCount = input->getChannelCount(); | 
| Chris@930 | 130     if ((int)m_plugin->getMaxChannelCount() < channelCount) { | 
| Chris@320 | 131 	channelCount = 1; | 
| Chris@320 | 132     } | 
| Chris@930 | 133     if ((int)m_plugin->getMinChannelCount() > channelCount) { | 
| Chris@361 | 134         m_message = tr("Cannot provide enough channels to feature extraction plugin \"%1\" (plugin min is %2, max %3; input model has %4)") | 
| Chris@361 | 135             .arg(pluginId) | 
| Chris@361 | 136             .arg(m_plugin->getMinChannelCount()) | 
| Chris@361 | 137             .arg(m_plugin->getMaxChannelCount()) | 
| Chris@361 | 138             .arg(input->getChannelCount()); | 
| Chris@1368 | 139         SVCERR << m_message << endl; | 
| Chris@849 | 140 	return false; | 
| Chris@320 | 141     } | 
| Chris@1368 | 142 | 
| Chris@690 | 143     SVDEBUG << "Initialising feature extraction plugin with channels = " | 
| Chris@1264 | 144             << channelCount << ", step = " << primaryTransform.getStepSize() | 
| Chris@1264 | 145             << ", block = " << primaryTransform.getBlockSize() << endl; | 
| Chris@320 | 146 | 
| Chris@320 | 147     if (!m_plugin->initialise(channelCount, | 
| Chris@849 | 148                               primaryTransform.getStepSize(), | 
| Chris@849 | 149                               primaryTransform.getBlockSize())) { | 
| Chris@1264 | 150 | 
| Chris@930 | 151         int pstep = primaryTransform.getStepSize(); | 
| Chris@930 | 152         int pblock = primaryTransform.getBlockSize(); | 
| Chris@361 | 153 | 
| Chris@850 | 154 ///!!! hang on, this isn't right -- we're modifying a copy | 
| Chris@849 | 155         primaryTransform.setStepSize(0); | 
| Chris@849 | 156         primaryTransform.setBlockSize(0); | 
| Chris@361 | 157         TransformFactory::getInstance()->makeContextConsistentWithPlugin | 
| Chris@849 | 158             (primaryTransform, m_plugin); | 
| Chris@361 | 159 | 
| Chris@849 | 160         if (primaryTransform.getStepSize() != pstep || | 
| Chris@849 | 161             primaryTransform.getBlockSize() != pblock) { | 
| Chris@1264 | 162 | 
| Chris@1264 | 163             SVDEBUG << "Initialisation failed, trying again with default step = " | 
| Chris@1264 | 164                     << primaryTransform.getStepSize() | 
| Chris@1264 | 165                     << ", block = " << primaryTransform.getBlockSize() << endl; | 
| Chris@361 | 166 | 
| Chris@361 | 167             if (!m_plugin->initialise(channelCount, | 
| Chris@849 | 168                                       primaryTransform.getStepSize(), | 
| Chris@849 | 169                                       primaryTransform.getBlockSize())) { | 
| Chris@361 | 170 | 
| Chris@1264 | 171                 SVDEBUG << "Initialisation failed again" << endl; | 
| Chris@1264 | 172 | 
| Chris@361 | 173                 m_message = tr("Failed to initialise feature extraction plugin \"%1\"").arg(pluginId); | 
| Chris@1368 | 174                 SVCERR << m_message << endl; | 
| Chris@849 | 175                 return false; | 
| Chris@361 | 176 | 
| Chris@361 | 177             } else { | 
| Chris@1264 | 178 | 
| Chris@1264 | 179                 SVDEBUG << "Initialisation succeeded this time" << endl; | 
| Chris@1264 | 180 | 
| Chris@361 | 181                 m_message = tr("Feature extraction plugin \"%1\" rejected the given step and block sizes (%2 and %3); using plugin defaults (%4 and %5) instead") | 
| Chris@361 | 182                     .arg(pluginId) | 
| Chris@361 | 183                     .arg(pstep) | 
| Chris@361 | 184                     .arg(pblock) | 
| Chris@849 | 185                     .arg(primaryTransform.getStepSize()) | 
| Chris@849 | 186                     .arg(primaryTransform.getBlockSize()); | 
| Chris@1368 | 187                 SVCERR << m_message << endl; | 
| Chris@361 | 188             } | 
| Chris@361 | 189 | 
| Chris@361 | 190         } else { | 
| Chris@361 | 191 | 
| Chris@1264 | 192             SVDEBUG << "Initialisation failed" << endl; | 
| Chris@1264 | 193 | 
| Chris@361 | 194             m_message = tr("Failed to initialise feature extraction plugin \"%1\"").arg(pluginId); | 
| Chris@1368 | 195             SVCERR << m_message << endl; | 
| Chris@849 | 196             return false; | 
| Chris@361 | 197         } | 
| Chris@1264 | 198     } else { | 
| Chris@1264 | 199         SVDEBUG << "Initialisation succeeded" << endl; | 
| Chris@320 | 200     } | 
| Chris@320 | 201 | 
| Chris@849 | 202     if (primaryTransform.getPluginVersion() != "") { | 
| Chris@366 | 203         QString pv = QString("%1").arg(m_plugin->getPluginVersion()); | 
| Chris@849 | 204         if (pv != primaryTransform.getPluginVersion()) { | 
| Chris@366 | 205             QString vm = tr("Transform was configured for version %1 of plugin \"%2\", but the plugin being used is version %3") | 
| Chris@849 | 206                 .arg(primaryTransform.getPluginVersion()) | 
| Chris@366 | 207                 .arg(pluginId) | 
| Chris@366 | 208                 .arg(pv); | 
| Chris@366 | 209             if (m_message != "") { | 
| Chris@366 | 210                 m_message = QString("%1; %2").arg(vm).arg(m_message); | 
| Chris@366 | 211             } else { | 
| Chris@366 | 212                 m_message = vm; | 
| Chris@366 | 213             } | 
| Chris@1368 | 214             SVCERR << m_message << endl; | 
| Chris@366 | 215         } | 
| Chris@366 | 216     } | 
| Chris@366 | 217 | 
| Chris@320 | 218     Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors(); | 
| Chris@320 | 219 | 
| Chris@320 | 220     if (outputs.empty()) { | 
| Chris@361 | 221         m_message = tr("Plugin \"%1\" has no outputs").arg(pluginId); | 
| Chris@1368 | 222         SVCERR << m_message << endl; | 
| Chris@849 | 223 	return false; | 
| Chris@320 | 224     } | 
| Chris@320 | 225 | 
| Chris@849 | 226     for (int j = 0; j < (int)m_transforms.size(); ++j) { | 
| Chris@849 | 227 | 
| Chris@849 | 228         for (int i = 0; i < (int)outputs.size(); ++i) { | 
| Chris@849 | 229 //        SVDEBUG << "comparing output " << i << " name \"" << outputs[i].identifier << "\" with expected \"" << m_transform.getOutput() << "\"" << endl; | 
| Chris@849 | 230             if (m_transforms[j].getOutput() == "" || | 
| Chris@849 | 231                 outputs[i].identifier == m_transforms[j].getOutput().toStdString()) { | 
| Chris@849 | 232                 m_outputNos.push_back(i); | 
| Chris@849 | 233                 m_descriptors.push_back(new Vamp::Plugin::OutputDescriptor(outputs[i])); | 
| Chris@849 | 234                 m_fixedRateFeatureNos.push_back(-1); // we increment before use | 
| Chris@849 | 235                 break; | 
| Chris@849 | 236             } | 
| Chris@849 | 237         } | 
| Chris@849 | 238 | 
| Chris@930 | 239         if ((int)m_descriptors.size() <= j) { | 
| Chris@849 | 240             m_message = tr("Plugin \"%1\" has no output named \"%2\"") | 
| Chris@849 | 241                 .arg(pluginId) | 
| Chris@849 | 242                 .arg(m_transforms[j].getOutput()); | 
| Chris@1368 | 243             SVCERR << m_message << endl; | 
| Chris@849 | 244             return false; | 
| Chris@849 | 245         } | 
| Chris@320 | 246     } | 
| Chris@320 | 247 | 
| Chris@849 | 248     for (int j = 0; j < (int)m_transforms.size(); ++j) { | 
| Chris@876 | 249         createOutputModels(j); | 
| Chris@849 | 250     } | 
| Chris@849 | 251 | 
| Chris@1211 | 252     m_outputMutex.lock(); | 
| Chris@1211 | 253     m_haveOutputs = true; | 
| Chris@1211 | 254     m_outputsCondition.wakeAll(); | 
| Chris@1211 | 255     m_outputMutex.unlock(); | 
| Chris@1211 | 256 | 
| Chris@849 | 257     return true; | 
| Chris@558 | 258 } | 
| Chris@558 | 259 | 
| Chris@558 | 260 void | 
| Chris@1237 | 261 FeatureExtractionModelTransformer::deinitialise() | 
| Chris@1237 | 262 { | 
| Chris@1264 | 263     SVDEBUG << "FeatureExtractionModelTransformer: deleting plugin for transform in thread " | 
| Chris@1264 | 264             << QThread::currentThreadId() << endl; | 
| Chris@1237 | 265 | 
| Chris@1237 | 266     delete m_plugin; | 
| Chris@1237 | 267     for (int j = 0; j < (int)m_descriptors.size(); ++j) { | 
| Chris@1237 | 268         delete m_descriptors[j]; | 
| Chris@1237 | 269     } | 
| Chris@1237 | 270 } | 
| Chris@1237 | 271 | 
| Chris@1237 | 272 void | 
| Chris@876 | 273 FeatureExtractionModelTransformer::createOutputModels(int n) | 
| Chris@558 | 274 { | 
| Chris@558 | 275     DenseTimeValueModel *input = getConformingInput(); | 
| Chris@712 | 276 | 
| Chris@849 | 277     PluginRDFDescription description(m_transforms[n].getPluginIdentifier()); | 
| Chris@849 | 278     QString outputId = m_transforms[n].getOutput(); | 
| Chris@558 | 279 | 
| Chris@320 | 280     int binCount = 1; | 
| Chris@320 | 281     float minValue = 0.0, maxValue = 0.0; | 
| Chris@320 | 282     bool haveExtents = false; | 
| Chris@876 | 283     bool haveBinCount = m_descriptors[n]->hasFixedBinCount; | 
| Chris@876 | 284 | 
| Chris@876 | 285     if (haveBinCount) { | 
| Chris@1039 | 286 	binCount = (int)m_descriptors[n]->binCount; | 
| Chris@320 | 287     } | 
| Chris@320 | 288 | 
| Chris@876 | 289     m_needAdditionalModels[n] = false; | 
| Chris@876 | 290 | 
| Chris@843 | 291 //    cerr << "FeatureExtractionModelTransformer: output bin count " | 
| Chris@843 | 292 //	      << binCount << endl; | 
| Chris@320 | 293 | 
| Chris@849 | 294     if (binCount > 0 && m_descriptors[n]->hasKnownExtents) { | 
| Chris@849 | 295 	minValue = m_descriptors[n]->minValue; | 
| Chris@849 | 296 	maxValue = m_descriptors[n]->maxValue; | 
| Chris@320 | 297         haveExtents = true; | 
| Chris@320 | 298     } | 
| Chris@320 | 299 | 
| Chris@1040 | 300     sv_samplerate_t modelRate = input->getSampleRate(); | 
| Chris@1254 | 301     sv_samplerate_t outputRate = modelRate; | 
| Chris@930 | 302     int modelResolution = 1; | 
| Chris@712 | 303 | 
| Chris@849 | 304     if (m_descriptors[n]->sampleType != | 
| Chris@785 | 305         Vamp::Plugin::OutputDescriptor::OneSamplePerStep) { | 
| Chris@1254 | 306 | 
| Chris@1254 | 307         outputRate = m_descriptors[n]->sampleRate; | 
| Chris@1254 | 308 | 
| Chris@1254 | 309         //!!! SV doesn't actually support display of models that have | 
| Chris@1254 | 310         //!!! different underlying rates together -- so we always set | 
| Chris@1254 | 311         //!!! the model rate to be the input model's rate, and adjust | 
| Chris@1254 | 312         //!!! the resolution appropriately.  We can't properly display | 
| Chris@1254 | 313         //!!! data with a higher resolution than the base model at all | 
| Chris@1254 | 314         if (outputRate > input->getSampleRate()) { | 
| Chris@1264 | 315             SVDEBUG << "WARNING: plugin reports output sample rate as " | 
| Chris@1264 | 316                     << outputRate | 
| Chris@1264 | 317                     << " (can't display features with finer resolution than the input rate of " | 
| Chris@1264 | 318                     << modelRate << ")" << endl; | 
| Chris@1254 | 319             outputRate = modelRate; | 
| Chris@785 | 320         } | 
| Chris@785 | 321     } | 
| Chris@785 | 322 | 
| Chris@849 | 323     switch (m_descriptors[n]->sampleType) { | 
| Chris@320 | 324 | 
| Chris@320 | 325     case Vamp::Plugin::OutputDescriptor::VariableSampleRate: | 
| Chris@1254 | 326 	if (outputRate != 0.0) { | 
| Chris@1254 | 327 	    modelResolution = int(round(modelRate / outputRate)); | 
| Chris@320 | 328 	} | 
| Chris@320 | 329 	break; | 
| Chris@320 | 330 | 
| Chris@320 | 331     case Vamp::Plugin::OutputDescriptor::OneSamplePerStep: | 
| Chris@849 | 332 	modelResolution = m_transforms[n].getStepSize(); | 
| Chris@320 | 333 	break; | 
| Chris@320 | 334 | 
| Chris@320 | 335     case Vamp::Plugin::OutputDescriptor::FixedSampleRate: | 
| Chris@1254 | 336         if (outputRate <= 0.0) { | 
| Chris@1264 | 337             SVDEBUG << "WARNING: Fixed sample-rate plugin reports invalid sample rate " << m_descriptors[n]->sampleRate << "; defaulting to input rate of " << input->getSampleRate() << endl; | 
| Chris@1071 | 338             modelResolution = 1; | 
| Chris@451 | 339         } else { | 
| Chris@1254 | 340             modelResolution = int(round(modelRate / outputRate)); | 
| Chris@1254 | 341 //            cerr << "modelRate = " << modelRate << ", descriptor rate = " << outputRate << ", modelResolution = " << modelResolution << endl; | 
| Chris@451 | 342         } | 
| Chris@320 | 343 	break; | 
| Chris@320 | 344     } | 
| Chris@320 | 345 | 
| Chris@441 | 346     bool preDurationPlugin = (m_plugin->getVampApiVersion() < 2); | 
| Chris@441 | 347 | 
| Chris@849 | 348     Model *out = 0; | 
| Chris@849 | 349 | 
| Chris@441 | 350     if (binCount == 0 && | 
| Chris@849 | 351         (preDurationPlugin || !m_descriptors[n]->hasDuration)) { | 
| Chris@320 | 352 | 
| Chris@445 | 353         // Anything with no value and no duration is an instant | 
| Chris@445 | 354 | 
| Chris@849 | 355         out = new SparseOneDimensionalModel(modelRate, modelResolution, false); | 
| Chris@558 | 356         QString outputEventTypeURI = description.getOutputEventTypeURI(outputId); | 
| Chris@849 | 357         out->setRDFTypeURI(outputEventTypeURI); | 
| Chris@558 | 358 | 
| Chris@441 | 359     } else if ((preDurationPlugin && binCount > 1 && | 
| Chris@849 | 360                 (m_descriptors[n]->sampleType == | 
| Chris@441 | 361                  Vamp::Plugin::OutputDescriptor::VariableSampleRate)) || | 
| Chris@849 | 362                (!preDurationPlugin && m_descriptors[n]->hasDuration)) { | 
| Chris@441 | 363 | 
| Chris@441 | 364         // For plugins using the old v1 API without explicit duration, | 
| Chris@441 | 365         // we treat anything that has multiple bins (i.e. that has the | 
| Chris@441 | 366         // potential to have value and duration) and a variable sample | 
| Chris@441 | 367         // rate as a note model, taking its values as pitch, duration | 
| Chris@441 | 368         // and velocity (if present) respectively.  This is the same | 
| Chris@441 | 369         // behaviour as always applied by SV to these plugins in the | 
| Chris@441 | 370         // past. | 
| Chris@441 | 371 | 
| Chris@441 | 372         // For plugins with the newer API, we treat anything with | 
| Chris@441 | 373         // duration as either a note model with pitch and velocity, or | 
| Chris@441 | 374         // a region model. | 
| Chris@441 | 375 | 
| Chris@441 | 376         // How do we know whether it's an interval or note model? | 
| Chris@441 | 377         // What's the essential difference?  Is a note model any | 
| Chris@441 | 378         // interval model using a Hz or "MIDI pitch" scale?  There | 
| Chris@441 | 379         // isn't really a reliable test for "MIDI pitch"...  Does a | 
| Chris@441 | 380         // note model always have velocity?  This is a good question | 
| Chris@441 | 381         // to be addressed by accompanying RDF, but for the moment we | 
| Chris@441 | 382         // will do the following... | 
| Chris@441 | 383 | 
| Chris@441 | 384         bool isNoteModel = false; | 
| Chris@441 | 385 | 
| Chris@441 | 386         // Regions have only value (and duration -- we can't extract a | 
| Chris@441 | 387         // region model from an old-style plugin that doesn't support | 
| Chris@441 | 388         // duration) | 
| Chris@441 | 389         if (binCount > 1) isNoteModel = true; | 
| Chris@441 | 390 | 
| Chris@595 | 391         // Regions do not have units of Hz or MIDI things (a sweeping | 
| Chris@595 | 392         // assumption!) | 
| Chris@849 | 393         if (m_descriptors[n]->unit == "Hz" || | 
| Chris@849 | 394             m_descriptors[n]->unit.find("MIDI") != std::string::npos || | 
| Chris@849 | 395             m_descriptors[n]->unit.find("midi") != std::string::npos) { | 
| Chris@595 | 396             isNoteModel = true; | 
| Chris@595 | 397         } | 
| Chris@441 | 398 | 
| Chris@441 | 399         // If we had a "sparse 3D model", we would have the additional | 
| Chris@441 | 400         // problem of determining whether to use that here (if bin | 
| Chris@441 | 401         // count > 1).  But we don't. | 
| Chris@441 | 402 | 
| Chris@859 | 403         QSettings settings; | 
| Chris@859 | 404         settings.beginGroup("Transformer"); | 
| Chris@859 | 405         bool flexi = settings.value("use-flexi-note-model", false).toBool(); | 
| Chris@859 | 406         settings.endGroup(); | 
| Chris@859 | 407 | 
| Chris@859 | 408         cerr << "flexi = " << flexi << endl; | 
| Chris@859 | 409 | 
| Chris@859 | 410         if (isNoteModel && !flexi) { | 
| Chris@441 | 411 | 
| Chris@441 | 412             NoteModel *model; | 
| Chris@441 | 413             if (haveExtents) { | 
| Chris@859 | 414                 model = new NoteModel | 
| Chris@859 | 415                     (modelRate, modelResolution, minValue, maxValue, false); | 
| Chris@441 | 416             } else { | 
| Chris@859 | 417                 model = new NoteModel | 
| Chris@859 | 418                     (modelRate, modelResolution, false); | 
| gyorgyf@786 | 419             } | 
| Chris@849 | 420             model->setScaleUnits(m_descriptors[n]->unit.c_str()); | 
| Chris@849 | 421             out = model; | 
| gyorgyf@786 | 422 | 
| Chris@859 | 423         } else if (isNoteModel && flexi) { | 
| gyorgyf@786 | 424 | 
| gyorgyf@786 | 425             FlexiNoteModel *model; | 
| gyorgyf@786 | 426             if (haveExtents) { | 
| Chris@859 | 427                 model = new FlexiNoteModel | 
| Chris@859 | 428                     (modelRate, modelResolution, minValue, maxValue, false); | 
| gyorgyf@786 | 429             } else { | 
| Chris@859 | 430                 model = new FlexiNoteModel | 
| Chris@859 | 431                     (modelRate, modelResolution, false); | 
| Chris@441 | 432             } | 
| Chris@849 | 433             model->setScaleUnits(m_descriptors[n]->unit.c_str()); | 
| Chris@849 | 434             out = model; | 
| Chris@441 | 435 | 
| Chris@441 | 436         } else { | 
| Chris@441 | 437 | 
| Chris@441 | 438             RegionModel *model; | 
| Chris@441 | 439             if (haveExtents) { | 
| Chris@441 | 440                 model = new RegionModel | 
| Chris@441 | 441                     (modelRate, modelResolution, minValue, maxValue, false); | 
| Chris@441 | 442             } else { | 
| Chris@441 | 443                 model = new RegionModel | 
| Chris@441 | 444                     (modelRate, modelResolution, false); | 
| Chris@441 | 445             } | 
| Chris@849 | 446             model->setScaleUnits(m_descriptors[n]->unit.c_str()); | 
| Chris@849 | 447             out = model; | 
| Chris@441 | 448         } | 
| Chris@441 | 449 | 
| Chris@558 | 450         QString outputEventTypeURI = description.getOutputEventTypeURI(outputId); | 
| Chris@849 | 451         out->setRDFTypeURI(outputEventTypeURI); | 
| Chris@558 | 452 | 
| Chris@876 | 453     } else if (binCount == 1 || | 
| Chris@849 | 454                (m_descriptors[n]->sampleType == | 
| Chris@441 | 455                 Vamp::Plugin::OutputDescriptor::VariableSampleRate)) { | 
| Chris@441 | 456 | 
| Chris@441 | 457         // Anything that is not a 1D, note, or interval model and that | 
| Chris@441 | 458         // has only one value per result must be a sparse time value | 
| Chris@441 | 459         // model. | 
| Chris@441 | 460 | 
| Chris@441 | 461         // Anything that is not a 1D, note, or interval model and that | 
| Chris@876 | 462         // has a variable sample rate is treated as a set of sparse | 
| Chris@876 | 463         // time value models, one per output bin, because we lack a | 
| Chris@441 | 464         // sparse 3D model. | 
| Chris@320 | 465 | 
| Chris@876 | 466         // Anything that is not a 1D, note, or interval model and that | 
| Chris@876 | 467         // has a fixed sample rate but an unknown number of values per | 
| Chris@876 | 468         // result is also treated as a set of sparse time value models. | 
| Chris@876 | 469 | 
| Chris@876 | 470         // For sets of sparse time value models, we create a single | 
| Chris@876 | 471         // model first as the "standard" output and then create models | 
| Chris@876 | 472         // for bins 1+ in the additional model map (mapping the output | 
| Chris@876 | 473         // descriptor to a list of models indexed by bin-1). But we | 
| Chris@876 | 474         // don't create the additional models yet, as this case has to | 
| Chris@876 | 475         // work even if the number of bins is unknown at this point -- | 
| Chris@877 | 476         // we create an additional model (copying its parameters from | 
| Chris@877 | 477         // the default one) each time a new bin is encountered. | 
| Chris@876 | 478 | 
| Chris@876 | 479         if (!haveBinCount || binCount > 1) { | 
| Chris@876 | 480             m_needAdditionalModels[n] = true; | 
| Chris@876 | 481         } | 
| Chris@876 | 482 | 
| Chris@320 | 483         SparseTimeValueModel *model; | 
| Chris@320 | 484         if (haveExtents) { | 
| Chris@320 | 485             model = new SparseTimeValueModel | 
| Chris@320 | 486                 (modelRate, modelResolution, minValue, maxValue, false); | 
| Chris@320 | 487         } else { | 
| Chris@320 | 488             model = new SparseTimeValueModel | 
| Chris@320 | 489                 (modelRate, modelResolution, false); | 
| Chris@320 | 490         } | 
| Chris@558 | 491 | 
| Chris@558 | 492         Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors(); | 
| Chris@849 | 493         model->setScaleUnits(outputs[m_outputNos[n]].unit.c_str()); | 
| Chris@320 | 494 | 
| Chris@849 | 495         out = model; | 
| Chris@320 | 496 | 
| Chris@558 | 497         QString outputEventTypeURI = description.getOutputEventTypeURI(outputId); | 
| Chris@849 | 498         out->setRDFTypeURI(outputEventTypeURI); | 
| Chris@558 | 499 | 
| Chris@441 | 500     } else { | 
| Chris@320 | 501 | 
| Chris@441 | 502         // Anything that is not a 1D, note, or interval model and that | 
| Chris@441 | 503         // has a fixed sample rate and more than one value per result | 
| Chris@441 | 504         // must be a dense 3D model. | 
| Chris@320 | 505 | 
| Chris@320 | 506         EditableDenseThreeDimensionalModel *model = | 
| Chris@320 | 507             new EditableDenseThreeDimensionalModel | 
| Chris@535 | 508             (modelRate, modelResolution, binCount, | 
| Chris@535 | 509              EditableDenseThreeDimensionalModel::BasicMultirateCompression, | 
| Chris@535 | 510              false); | 
| Chris@320 | 511 | 
| Chris@849 | 512 	if (!m_descriptors[n]->binNames.empty()) { | 
| Chris@320 | 513 	    std::vector<QString> names; | 
| Chris@930 | 514 	    for (int i = 0; i < (int)m_descriptors[n]->binNames.size(); ++i) { | 
| Chris@849 | 515 		names.push_back(m_descriptors[n]->binNames[i].c_str()); | 
| Chris@320 | 516 	    } | 
| Chris@320 | 517 	    model->setBinNames(names); | 
| Chris@320 | 518 	} | 
| Chris@320 | 519 | 
| Chris@849 | 520         out = model; | 
| Chris@558 | 521 | 
| Chris@558 | 522         QString outputSignalTypeURI = description.getOutputSignalTypeURI(outputId); | 
| Chris@849 | 523         out->setRDFTypeURI(outputSignalTypeURI); | 
| Chris@320 | 524     } | 
| Chris@333 | 525 | 
| Chris@849 | 526     if (out) { | 
| Chris@849 | 527         out->setSourceModel(input); | 
| Chris@849 | 528         m_outputs.push_back(out); | 
| Chris@849 | 529     } | 
| Chris@320 | 530 } | 
| Chris@320 | 531 | 
| Chris@1211 | 532 void | 
| Chris@1211 | 533 FeatureExtractionModelTransformer::awaitOutputModels() | 
| Chris@1211 | 534 { | 
| Chris@1211 | 535     m_outputMutex.lock(); | 
| Chris@1368 | 536     while (!m_haveOutputs && !m_abandoned) { | 
| Chris@1368 | 537         m_outputsCondition.wait(&m_outputMutex, 500); | 
| Chris@1211 | 538     } | 
| Chris@1211 | 539     m_outputMutex.unlock(); | 
| Chris@1211 | 540 } | 
| Chris@1211 | 541 | 
| Chris@331 | 542 FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer() | 
| Chris@320 | 543 { | 
| Chris@1237 | 544     // Parent class dtor set the abandoned flag and waited for the run | 
| Chris@1237 | 545     // thread to exit; the run thread owns the plugin, and should have | 
| Chris@1237 | 546     // destroyed it before exiting (via a call to deinitialise) | 
| Chris@320 | 547 } | 
| Chris@320 | 548 | 
| Chris@876 | 549 FeatureExtractionModelTransformer::Models | 
| Chris@876 | 550 FeatureExtractionModelTransformer::getAdditionalOutputModels() | 
| Chris@876 | 551 { | 
| Chris@876 | 552     Models mm; | 
| Chris@876 | 553     for (AdditionalModelMap::iterator i = m_additionalModels.begin(); | 
| Chris@876 | 554          i != m_additionalModels.end(); ++i) { | 
| Chris@876 | 555         for (std::map<int, SparseTimeValueModel *>::iterator j = | 
| Chris@876 | 556                  i->second.begin(); | 
| Chris@876 | 557              j != i->second.end(); ++j) { | 
| Chris@876 | 558             SparseTimeValueModel *m = j->second; | 
| Chris@876 | 559             if (m) mm.push_back(m); | 
| Chris@876 | 560         } | 
| Chris@876 | 561     } | 
| Chris@876 | 562     return mm; | 
| Chris@876 | 563 } | 
| Chris@876 | 564 | 
| Chris@877 | 565 bool | 
| Chris@877 | 566 FeatureExtractionModelTransformer::willHaveAdditionalOutputModels() | 
| Chris@877 | 567 { | 
| Chris@877 | 568     for (std::map<int, bool>::const_iterator i = | 
| Chris@877 | 569              m_needAdditionalModels.begin(); | 
| Chris@877 | 570          i != m_needAdditionalModels.end(); ++i) { | 
| Chris@877 | 571         if (i->second) return true; | 
| Chris@877 | 572     } | 
| Chris@877 | 573     return false; | 
| Chris@877 | 574 } | 
| Chris@877 | 575 | 
| Chris@876 | 576 SparseTimeValueModel * | 
| Chris@876 | 577 FeatureExtractionModelTransformer::getAdditionalModel(int n, int binNo) | 
| Chris@876 | 578 { | 
| Chris@893 | 579 //    std::cerr << "getAdditionalModel(" << n << ", " << binNo << ")" << std::endl; | 
| Chris@876 | 580 | 
| Chris@876 | 581     if (binNo == 0) { | 
| Chris@876 | 582         std::cerr << "Internal error: binNo == 0 in getAdditionalModel (should be using primary model)" << std::endl; | 
| Chris@876 | 583         return 0; | 
| Chris@876 | 584     } | 
| Chris@876 | 585 | 
| Chris@876 | 586     if (!m_needAdditionalModels[n]) return 0; | 
| Chris@876 | 587     if (!isOutput<SparseTimeValueModel>(n)) return 0; | 
| Chris@876 | 588     if (m_additionalModels[n][binNo]) return m_additionalModels[n][binNo]; | 
| Chris@876 | 589 | 
| Chris@876 | 590     std::cerr << "getAdditionalModel(" << n << ", " << binNo << "): creating" << std::endl; | 
| Chris@876 | 591 | 
| Chris@876 | 592     SparseTimeValueModel *baseModel = getConformingOutput<SparseTimeValueModel>(n); | 
| Chris@876 | 593     if (!baseModel) return 0; | 
| Chris@876 | 594 | 
| Chris@876 | 595     std::cerr << "getAdditionalModel(" << n << ", " << binNo << "): (from " << baseModel << ")" << std::endl; | 
| Chris@876 | 596 | 
| Chris@876 | 597     SparseTimeValueModel *additional = | 
| Chris@876 | 598         new SparseTimeValueModel(baseModel->getSampleRate(), | 
| Chris@876 | 599                                  baseModel->getResolution(), | 
| Chris@876 | 600                                  baseModel->getValueMinimum(), | 
| Chris@876 | 601                                  baseModel->getValueMaximum(), | 
| Chris@876 | 602                                  false); | 
| Chris@876 | 603 | 
| Chris@876 | 604     additional->setScaleUnits(baseModel->getScaleUnits()); | 
| Chris@876 | 605     additional->setRDFTypeURI(baseModel->getRDFTypeURI()); | 
| Chris@876 | 606 | 
| Chris@876 | 607     m_additionalModels[n][binNo] = additional; | 
| Chris@876 | 608     return additional; | 
| Chris@876 | 609 } | 
| Chris@876 | 610 | 
| Chris@320 | 611 DenseTimeValueModel * | 
| Chris@350 | 612 FeatureExtractionModelTransformer::getConformingInput() | 
| Chris@320 | 613 { | 
| Chris@690 | 614 //    SVDEBUG << "FeatureExtractionModelTransformer::getConformingInput: input model is " << getInputModel() << endl; | 
| Chris@408 | 615 | 
| Chris@320 | 616     DenseTimeValueModel *dtvm = | 
| Chris@320 | 617 	dynamic_cast<DenseTimeValueModel *>(getInputModel()); | 
| Chris@320 | 618     if (!dtvm) { | 
| Chris@690 | 619 	SVDEBUG << "FeatureExtractionModelTransformer::getConformingInput: WARNING: Input model is not conformable to DenseTimeValueModel" << endl; | 
| Chris@320 | 620     } | 
| Chris@320 | 621     return dtvm; | 
| Chris@320 | 622 } | 
| Chris@320 | 623 | 
| Chris@320 | 624 void | 
| Chris@331 | 625 FeatureExtractionModelTransformer::run() | 
| Chris@320 | 626 { | 
| Chris@1368 | 627     if (initialise()) { | 
| Chris@1368 | 628         abandon(); | 
| Chris@1368 | 629         return; | 
| Chris@1368 | 630     } | 
| Chris@1211 | 631 | 
| Chris@350 | 632     DenseTimeValueModel *input = getConformingInput(); | 
| Chris@1368 | 633     if (!input) { | 
| Chris@1368 | 634         abandon(); | 
| Chris@1368 | 635         return; | 
| Chris@1368 | 636     } | 
| Chris@320 | 637 | 
| Chris@1368 | 638     if (m_outputs.empty()) { | 
| Chris@1368 | 639         abandon(); | 
| Chris@1368 | 640         return; | 
| Chris@1368 | 641     } | 
| Chris@320 | 642 | 
| Chris@850 | 643     Transform primaryTransform = m_transforms[0]; | 
| Chris@850 | 644 | 
| Chris@497 | 645     while (!input->isReady() && !m_abandoned) { | 
| Chris@877 | 646         cerr << "FeatureExtractionModelTransformer::run: Waiting for input model to be ready..." << endl; | 
| Chris@497 | 647         usleep(500000); | 
| Chris@320 | 648     } | 
| Chris@497 | 649     if (m_abandoned) return; | 
| Chris@320 | 650 | 
| Chris@1040 | 651     sv_samplerate_t sampleRate = input->getSampleRate(); | 
| Chris@320 | 652 | 
| Chris@930 | 653     int channelCount = input->getChannelCount(); | 
| Chris@930 | 654     if ((int)m_plugin->getMaxChannelCount() < channelCount) { | 
| Chris@320 | 655 	channelCount = 1; | 
| Chris@320 | 656     } | 
| Chris@320 | 657 | 
| Chris@320 | 658     float **buffers = new float*[channelCount]; | 
| Chris@930 | 659     for (int ch = 0; ch < channelCount; ++ch) { | 
| Chris@850 | 660 	buffers[ch] = new float[primaryTransform.getBlockSize() + 2]; | 
| Chris@320 | 661     } | 
| Chris@320 | 662 | 
| Chris@930 | 663     int stepSize = primaryTransform.getStepSize(); | 
| Chris@930 | 664     int blockSize = primaryTransform.getBlockSize(); | 
| Chris@350 | 665 | 
| Chris@320 | 666     bool frequencyDomain = (m_plugin->getInputDomain() == | 
| Chris@320 | 667                             Vamp::Plugin::FrequencyDomain); | 
| Chris@320 | 668     std::vector<FFTModel *> fftModels; | 
| Chris@320 | 669 | 
| Chris@320 | 670     if (frequencyDomain) { | 
| Chris@930 | 671         for (int ch = 0; ch < channelCount; ++ch) { | 
| Chris@320 | 672             FFTModel *model = new FFTModel | 
| Chris@350 | 673                                   (getConformingInput(), | 
| Chris@350 | 674                                    channelCount == 1 ? m_input.getChannel() : ch, | 
| Chris@850 | 675                                    primaryTransform.getWindowType(), | 
| Chris@350 | 676                                    blockSize, | 
| Chris@350 | 677                                    stepSize, | 
| Chris@1090 | 678                                    blockSize); | 
| Chris@1080 | 679             if (!model->isOK() || model->getError() != "") { | 
| Chris@1080 | 680                 QString err = model->getError(); | 
| Chris@320 | 681                 delete model; | 
| Chris@850 | 682                 for (int j = 0; j < (int)m_outputNos.size(); ++j) { | 
| Chris@850 | 683                     setCompletion(j, 100); | 
| Chris@850 | 684                 } | 
| Chris@387 | 685                 //!!! need a better way to handle this -- previously we were using a QMessageBox but that isn't an appropriate thing to do here either | 
| Chris@1080 | 686                 throw AllocationFailed("Failed to create the FFT model for this feature extraction model transformer: error is: " + err); | 
| Chris@320 | 687             } | 
| Chris@320 | 688             fftModels.push_back(model); | 
| Chris@1080 | 689             cerr << "created model for channel " << ch << endl; | 
| Chris@320 | 690         } | 
| Chris@320 | 691     } | 
| Chris@320 | 692 | 
| Chris@1040 | 693     sv_frame_t startFrame = m_input.getModel()->getStartFrame(); | 
| Chris@1040 | 694     sv_frame_t endFrame = m_input.getModel()->getEndFrame(); | 
| Chris@320 | 695 | 
| Chris@850 | 696     RealTime contextStartRT = primaryTransform.getStartTime(); | 
| Chris@850 | 697     RealTime contextDurationRT = primaryTransform.getDuration(); | 
| Chris@350 | 698 | 
| Chris@1040 | 699     sv_frame_t contextStart = | 
| Chris@350 | 700         RealTime::realTime2Frame(contextStartRT, sampleRate); | 
| Chris@350 | 701 | 
| Chris@1040 | 702     sv_frame_t contextDuration = | 
| Chris@350 | 703         RealTime::realTime2Frame(contextDurationRT, sampleRate); | 
| Chris@320 | 704 | 
| Chris@320 | 705     if (contextStart == 0 || contextStart < startFrame) { | 
| Chris@320 | 706         contextStart = startFrame; | 
| Chris@320 | 707     } | 
| Chris@320 | 708 | 
| Chris@320 | 709     if (contextDuration == 0) { | 
| Chris@320 | 710         contextDuration = endFrame - contextStart; | 
| Chris@320 | 711     } | 
| Chris@320 | 712     if (contextStart + contextDuration > endFrame) { | 
| Chris@320 | 713         contextDuration = endFrame - contextStart; | 
| Chris@320 | 714     } | 
| Chris@320 | 715 | 
| Chris@1039 | 716     sv_frame_t blockFrame = contextStart; | 
| Chris@320 | 717 | 
| Chris@320 | 718     long prevCompletion = 0; | 
| Chris@320 | 719 | 
| Chris@850 | 720     for (int j = 0; j < (int)m_outputNos.size(); ++j) { | 
| Chris@850 | 721         setCompletion(j, 0); | 
| Chris@850 | 722     } | 
| Chris@320 | 723 | 
| Chris@556 | 724     float *reals = 0; | 
| Chris@556 | 725     float *imaginaries = 0; | 
| Chris@556 | 726     if (frequencyDomain) { | 
| Chris@556 | 727         reals = new float[blockSize/2 + 1]; | 
| Chris@556 | 728         imaginaries = new float[blockSize/2 + 1]; | 
| Chris@556 | 729     } | 
| Chris@556 | 730 | 
| Chris@678 | 731     QString error = ""; | 
| Chris@678 | 732 | 
| Chris@320 | 733     while (!m_abandoned) { | 
| Chris@320 | 734 | 
| Chris@320 | 735         if (frequencyDomain) { | 
| Chris@350 | 736             if (blockFrame - int(blockSize)/2 > | 
| Chris@320 | 737                 contextStart + contextDuration) break; | 
| Chris@320 | 738         } else { | 
| Chris@320 | 739             if (blockFrame >= | 
| Chris@320 | 740                 contextStart + contextDuration) break; | 
| Chris@320 | 741         } | 
| Chris@320 | 742 | 
| Chris@690 | 743 //	SVDEBUG << "FeatureExtractionModelTransformer::run: blockFrame " | 
| Chris@320 | 744 //		  << blockFrame << ", endFrame " << endFrame << ", blockSize " | 
| Chris@687 | 745 //                  << blockSize << endl; | 
| Chris@320 | 746 | 
| Chris@1039 | 747 	int completion = int | 
| Chris@1039 | 748 	    ((((blockFrame - contextStart) / stepSize) * 99) / | 
| Chris@1039 | 749              (contextDuration / stepSize + 1)); | 
| Chris@320 | 750 | 
| Chris@350 | 751 	// channelCount is either m_input.getModel()->channelCount or 1 | 
| Chris@320 | 752 | 
| Chris@363 | 753         if (frequencyDomain) { | 
| Chris@930 | 754             for (int ch = 0; ch < channelCount; ++ch) { | 
| Chris@1039 | 755                 int column = int((blockFrame - startFrame) / stepSize); | 
| Chris@1008 | 756                 if (fftModels[ch]->getValuesAt(column, reals, imaginaries)) { | 
| Chris@1008 | 757                     for (int i = 0; i <= blockSize/2; ++i) { | 
| Chris@1008 | 758                         buffers[ch][i*2] = reals[i]; | 
| Chris@1008 | 759                         buffers[ch][i*2+1] = imaginaries[i]; | 
| Chris@1008 | 760                     } | 
| Chris@1008 | 761                 } else { | 
| Chris@1008 | 762                     for (int i = 0; i <= blockSize/2; ++i) { | 
| Chris@1008 | 763                         buffers[ch][i*2] = 0.f; | 
| Chris@1008 | 764                         buffers[ch][i*2+1] = 0.f; | 
| Chris@1008 | 765                     } | 
| Chris@1008 | 766                 } | 
| Chris@678 | 767                 error = fftModels[ch]->getError(); | 
| Chris@678 | 768                 if (error != "") { | 
| Chris@1264 | 769                     SVDEBUG << "FeatureExtractionModelTransformer::run: Abandoning, error is " << error << endl; | 
| Chris@678 | 770                     m_abandoned = true; | 
| Chris@678 | 771                     m_message = error; | 
| Chris@1080 | 772                     break; | 
| Chris@678 | 773                 } | 
| Chris@363 | 774             } | 
| Chris@363 | 775         } else { | 
| Chris@363 | 776             getFrames(channelCount, blockFrame, blockSize, buffers); | 
| Chris@320 | 777         } | 
| Chris@320 | 778 | 
| Chris@497 | 779         if (m_abandoned) break; | 
| Chris@497 | 780 | 
| Chris@320 | 781 	Vamp::Plugin::FeatureSet features = m_plugin->process | 
| Chris@1040 | 782 	    (buffers, RealTime::frame2RealTime(blockFrame, sampleRate).toVampRealTime()); | 
| Chris@320 | 783 | 
| Chris@497 | 784         if (m_abandoned) break; | 
| Chris@497 | 785 | 
| Chris@850 | 786         for (int j = 0; j < (int)m_outputNos.size(); ++j) { | 
| Chris@930 | 787             for (int fi = 0; fi < (int)features[m_outputNos[j]].size(); ++fi) { | 
| Chris@850 | 788                 Vamp::Plugin::Feature feature = features[m_outputNos[j]][fi]; | 
| Chris@850 | 789                 addFeature(j, blockFrame, feature); | 
| Chris@850 | 790             } | 
| Chris@850 | 791         } | 
| Chris@320 | 792 | 
| Chris@320 | 793 	if (blockFrame == contextStart || completion > prevCompletion) { | 
| Chris@850 | 794             for (int j = 0; j < (int)m_outputNos.size(); ++j) { | 
| Chris@850 | 795                 setCompletion(j, completion); | 
| Chris@850 | 796             } | 
| Chris@320 | 797 	    prevCompletion = completion; | 
| Chris@320 | 798 	} | 
| Chris@320 | 799 | 
| Chris@350 | 800 	blockFrame += stepSize; | 
| Chris@320 | 801     } | 
| Chris@320 | 802 | 
| Chris@497 | 803     if (!m_abandoned) { | 
| Chris@497 | 804         Vamp::Plugin::FeatureSet features = m_plugin->getRemainingFeatures(); | 
| Chris@320 | 805 | 
| Chris@850 | 806         for (int j = 0; j < (int)m_outputNos.size(); ++j) { | 
| Chris@930 | 807             for (int fi = 0; fi < (int)features[m_outputNos[j]].size(); ++fi) { | 
| Chris@850 | 808                 Vamp::Plugin::Feature feature = features[m_outputNos[j]][fi]; | 
| Chris@850 | 809                 addFeature(j, blockFrame, feature); | 
| Chris@850 | 810             } | 
| Chris@497 | 811         } | 
| Chris@497 | 812     } | 
| Chris@320 | 813 | 
| Chris@850 | 814     for (int j = 0; j < (int)m_outputNos.size(); ++j) { | 
| Chris@850 | 815         setCompletion(j, 100); | 
| Chris@850 | 816     } | 
| Chris@320 | 817 | 
| Chris@320 | 818     if (frequencyDomain) { | 
| Chris@930 | 819         for (int ch = 0; ch < channelCount; ++ch) { | 
| Chris@320 | 820             delete fftModels[ch]; | 
| Chris@320 | 821         } | 
| Chris@556 | 822         delete[] reals; | 
| Chris@556 | 823         delete[] imaginaries; | 
| Chris@320 | 824     } | 
| Chris@974 | 825 | 
| Chris@974 | 826     for (int ch = 0; ch < channelCount; ++ch) { | 
| Chris@974 | 827         delete[] buffers[ch]; | 
| Chris@974 | 828     } | 
| Chris@974 | 829     delete[] buffers; | 
| Chris@1237 | 830 | 
| Chris@1237 | 831     deinitialise(); | 
| Chris@320 | 832 } | 
| Chris@320 | 833 | 
| Chris@320 | 834 void | 
| Chris@363 | 835 FeatureExtractionModelTransformer::getFrames(int channelCount, | 
| Chris@1039 | 836                                              sv_frame_t startFrame, | 
| Chris@1039 | 837                                              sv_frame_t size, | 
| Chris@363 | 838                                              float **buffers) | 
| Chris@320 | 839 { | 
| Chris@1039 | 840     sv_frame_t offset = 0; | 
| Chris@320 | 841 | 
| Chris@320 | 842     if (startFrame < 0) { | 
| Chris@363 | 843         for (int c = 0; c < channelCount; ++c) { | 
| Chris@1039 | 844             for (sv_frame_t i = 0; i < size && startFrame + i < 0; ++i) { | 
| Chris@363 | 845                 buffers[c][i] = 0.0f; | 
| Chris@363 | 846             } | 
| Chris@320 | 847         } | 
| Chris@320 | 848         offset = -startFrame; | 
| Chris@320 | 849         size -= offset; | 
| Chris@320 | 850         if (size <= 0) return; | 
| Chris@320 | 851         startFrame = 0; | 
| Chris@320 | 852     } | 
| Chris@320 | 853 | 
| Chris@350 | 854     DenseTimeValueModel *input = getConformingInput(); | 
| Chris@350 | 855     if (!input) return; | 
| Chris@363 | 856 | 
| Chris@1039 | 857     sv_frame_t got = 0; | 
| Chris@350 | 858 | 
| Chris@363 | 859     if (channelCount == 1) { | 
| Chris@363 | 860 | 
| Chris@1096 | 861         auto data = input->getData(m_input.getChannel(), startFrame, size); | 
| Chris@1096 | 862         got = data.size(); | 
| Chris@1096 | 863 | 
| Chris@1096 | 864         copy(data.begin(), data.end(), buffers[0] + offset); | 
| Chris@363 | 865 | 
| Chris@363 | 866         if (m_input.getChannel() == -1 && input->getChannelCount() > 1) { | 
| Chris@363 | 867             // use mean instead of sum, as plugin input | 
| Chris@363 | 868             float cc = float(input->getChannelCount()); | 
| Chris@1096 | 869             for (sv_frame_t i = 0; i < got; ++i) { | 
| Chris@363 | 870                 buffers[0][i + offset] /= cc; | 
| Chris@363 | 871             } | 
| Chris@363 | 872         } | 
| Chris@363 | 873 | 
| Chris@363 | 874     } else { | 
| Chris@363 | 875 | 
| Chris@1096 | 876         auto data = input->getMultiChannelData(0, channelCount-1, startFrame, size); | 
| Chris@1096 | 877         if (!data.empty()) { | 
| Chris@1096 | 878             got = data[0].size(); | 
| Chris@1096 | 879             for (int c = 0; in_range_for(data, c); ++c) { | 
| Chris@1096 | 880                 copy(data[c].begin(), data[c].end(), buffers[c] + offset); | 
| Chris@363 | 881             } | 
| Chris@363 | 882         } | 
| Chris@363 | 883     } | 
| Chris@320 | 884 | 
| Chris@320 | 885     while (got < size) { | 
| Chris@363 | 886         for (int c = 0; c < channelCount; ++c) { | 
| Chris@363 | 887             buffers[c][got + offset] = 0.0; | 
| Chris@363 | 888         } | 
| Chris@320 | 889         ++got; | 
| Chris@320 | 890     } | 
| Chris@320 | 891 } | 
| Chris@320 | 892 | 
| Chris@320 | 893 void | 
| Chris@850 | 894 FeatureExtractionModelTransformer::addFeature(int n, | 
| Chris@1039 | 895                                               sv_frame_t blockFrame, | 
| Chris@850 | 896                                               const Vamp::Plugin::Feature &feature) | 
| Chris@320 | 897 { | 
| Chris@1040 | 898     sv_samplerate_t inputRate = m_input.getModel()->getSampleRate(); | 
| Chris@320 | 899 | 
| Chris@843 | 900 //    cerr << "FeatureExtractionModelTransformer::addFeature: blockFrame = " | 
| Chris@712 | 901 //              << blockFrame << ", hasTimestamp = " << feature.hasTimestamp | 
| Chris@712 | 902 //              << ", timestamp = " << feature.timestamp << ", hasDuration = " | 
| Chris@712 | 903 //              << feature.hasDuration << ", duration = " << feature.duration | 
| Chris@843 | 904 //              << endl; | 
| Chris@320 | 905 | 
| Chris@1039 | 906     sv_frame_t frame = blockFrame; | 
| Chris@320 | 907 | 
| Chris@849 | 908     if (m_descriptors[n]->sampleType == | 
| Chris@320 | 909 	Vamp::Plugin::OutputDescriptor::VariableSampleRate) { | 
| Chris@320 | 910 | 
| Chris@320 | 911 	if (!feature.hasTimestamp) { | 
| Chris@1264 | 912 	    SVDEBUG | 
| Chris@331 | 913 		<< "WARNING: FeatureExtractionModelTransformer::addFeature: " | 
| Chris@320 | 914 		<< "Feature has variable sample rate but no timestamp!" | 
| Chris@843 | 915 		<< endl; | 
| Chris@320 | 916 	    return; | 
| Chris@320 | 917 	} else { | 
| Chris@1040 | 918 	    frame = RealTime::realTime2Frame(feature.timestamp, inputRate); | 
| Chris@320 | 919 	} | 
| Chris@320 | 920 | 
| Chris@1071 | 921 //        cerr << "variable sample rate: timestamp = " << feature.timestamp | 
| Chris@1071 | 922 //             << " at input rate " << inputRate << " -> " << frame << endl; | 
| Chris@1071 | 923 | 
| Chris@849 | 924     } else if (m_descriptors[n]->sampleType == | 
| Chris@320 | 925 	       Vamp::Plugin::OutputDescriptor::FixedSampleRate) { | 
| Chris@320 | 926 | 
| Chris@1071 | 927         sv_samplerate_t rate = m_descriptors[n]->sampleRate; | 
| Chris@1071 | 928         if (rate <= 0.0) { | 
| Chris@1071 | 929             rate = inputRate; | 
| Chris@1071 | 930         } | 
| Chris@1071 | 931 | 
| Chris@779 | 932         if (!feature.hasTimestamp) { | 
| Chris@849 | 933             ++m_fixedRateFeatureNos[n]; | 
| Chris@779 | 934         } else { | 
| Chris@779 | 935             RealTime ts(feature.timestamp.sec, feature.timestamp.nsec); | 
| Chris@1071 | 936             m_fixedRateFeatureNos[n] = (int)lrint(ts.toDouble() * rate); | 
| Chris@779 | 937         } | 
| Chris@862 | 938 | 
| Chris@1071 | 939 //        cerr << "m_fixedRateFeatureNo = " << m_fixedRateFeatureNos[n] | 
| Chris@1071 | 940 //             << ", m_descriptor->sampleRate = " << m_descriptors[n]->sampleRate | 
| Chris@862 | 941 //             << ", inputRate = " << inputRate | 
| Chris@862 | 942 //             << " giving frame = "; | 
| Chris@1071 | 943         frame = lrint((double(m_fixedRateFeatureNos[n]) / rate) * inputRate); | 
| Chris@1071 | 944 //        cerr << frame << endl; | 
| Chris@320 | 945     } | 
| Chris@862 | 946 | 
| Chris@862 | 947     if (frame < 0) { | 
| Chris@1264 | 948         SVDEBUG | 
| Chris@862 | 949             << "WARNING: FeatureExtractionModelTransformer::addFeature: " | 
| Chris@862 | 950             << "Negative frame counts are not supported (frame = " << frame | 
| Chris@862 | 951             << " from timestamp " << feature.timestamp | 
| Chris@862 | 952             << "), dropping feature" | 
| Chris@862 | 953             << endl; | 
| Chris@862 | 954         return; | 
| Chris@862 | 955     } | 
| Chris@862 | 956 | 
| Chris@441 | 957     // Rather than repeat the complicated tests from the constructor | 
| Chris@441 | 958     // to determine what sort of model we must be adding the features | 
| Chris@441 | 959     // to, we instead test what sort of model the constructor decided | 
| Chris@441 | 960     // to create. | 
| Chris@320 | 961 | 
| Chris@849 | 962     if (isOutput<SparseOneDimensionalModel>(n)) { | 
| Chris@441 | 963 | 
| Chris@441 | 964         SparseOneDimensionalModel *model = | 
| Chris@849 | 965             getConformingOutput<SparseOneDimensionalModel>(n); | 
| Chris@320 | 966 	if (!model) return; | 
| Chris@350 | 967 | 
| Chris@441 | 968         model->addPoint(SparseOneDimensionalModel::Point | 
| Chris@441 | 969                        (frame, feature.label.c_str())); | 
| Chris@320 | 970 | 
| Chris@849 | 971     } else if (isOutput<SparseTimeValueModel>(n)) { | 
| Chris@320 | 972 | 
| Chris@350 | 973 	SparseTimeValueModel *model = | 
| Chris@849 | 974             getConformingOutput<SparseTimeValueModel>(n); | 
| Chris@320 | 975 	if (!model) return; | 
| Chris@350 | 976 | 
| Chris@930 | 977         for (int i = 0; i < (int)feature.values.size(); ++i) { | 
| Chris@454 | 978 | 
| Chris@454 | 979             float value = feature.values[i]; | 
| Chris@454 | 980 | 
| Chris@454 | 981             QString label = feature.label.c_str(); | 
| Chris@454 | 982             if (feature.values.size() > 1) { | 
| Chris@454 | 983                 label = QString("[%1] %2").arg(i+1).arg(label); | 
| Chris@454 | 984             } | 
| Chris@454 | 985 | 
| Chris@876 | 986             SparseTimeValueModel *targetModel = model; | 
| Chris@876 | 987 | 
| Chris@876 | 988             if (m_needAdditionalModels[n] && i > 0) { | 
| Chris@876 | 989                 targetModel = getAdditionalModel(n, i); | 
| Chris@876 | 990                 if (!targetModel) targetModel = model; | 
| Chris@893 | 991 //                std::cerr << "adding point to model " << targetModel | 
| Chris@893 | 992 //                          << " for output " << n << " bin " << i << std::endl; | 
| Chris@876 | 993             } | 
| Chris@876 | 994 | 
| Chris@876 | 995             targetModel->addPoint | 
| Chris@876 | 996                 (SparseTimeValueModel::Point(frame, value, label)); | 
| Chris@454 | 997         } | 
| Chris@320 | 998 | 
| Chris@849 | 999     } else if (isOutput<FlexiNoteModel>(n) || isOutput<NoteModel>(n) || isOutput<RegionModel>(n)) { //GF: Added Note Model | 
| Chris@320 | 1000 | 
| Chris@441 | 1001         int index = 0; | 
| Chris@441 | 1002 | 
| Chris@441 | 1003         float value = 0.0; | 
| Chris@930 | 1004         if ((int)feature.values.size() > index) { | 
| Chris@441 | 1005             value = feature.values[index++]; | 
| Chris@441 | 1006         } | 
| Chris@320 | 1007 | 
| Chris@1039 | 1008         sv_frame_t duration = 1; | 
| Chris@441 | 1009         if (feature.hasDuration) { | 
| Chris@1040 | 1010             duration = RealTime::realTime2Frame(feature.duration, inputRate); | 
| Chris@441 | 1011         } else { | 
| Chris@1039 | 1012             if (in_range_for(feature.values, index)) { | 
| Chris@1039 | 1013                 duration = lrintf(feature.values[index++]); | 
| Chris@441 | 1014             } | 
| Chris@441 | 1015         } | 
| gyorgyf@786 | 1016 | 
| Chris@891 | 1017         if (isOutput<FlexiNoteModel>(n)) { // GF: added for flexi note model | 
| gyorgyf@786 | 1018 | 
| gyorgyf@786 | 1019             float velocity = 100; | 
| Chris@930 | 1020             if ((int)feature.values.size() > index) { | 
| gyorgyf@786 | 1021                 velocity = feature.values[index++]; | 
| gyorgyf@786 | 1022             } | 
| gyorgyf@786 | 1023             if (velocity < 0) velocity = 127; | 
| gyorgyf@786 | 1024             if (velocity > 127) velocity = 127; | 
| gyorgyf@786 | 1025 | 
| Chris@849 | 1026             FlexiNoteModel *model = getConformingOutput<FlexiNoteModel>(n); | 
| gyorgyf@786 | 1027             if (!model) return; | 
| Chris@1039 | 1028             model->addPoint(FlexiNoteModel::Point(frame, | 
| Chris@1039 | 1029                                                   value, // value is pitch | 
| Chris@1039 | 1030                                                   duration, | 
| Chris@1039 | 1031                                                   velocity / 127.f, | 
| Chris@1039 | 1032                                                   feature.label.c_str())); | 
| gyorgyf@786 | 1033 			// GF: end -- added for flexi note model | 
| Chris@849 | 1034         } else  if (isOutput<NoteModel>(n)) { | 
| Chris@320 | 1035 | 
| Chris@441 | 1036             float velocity = 100; | 
| Chris@930 | 1037             if ((int)feature.values.size() > index) { | 
| Chris@441 | 1038                 velocity = feature.values[index++]; | 
| Chris@441 | 1039             } | 
| Chris@441 | 1040             if (velocity < 0) velocity = 127; | 
| Chris@441 | 1041             if (velocity > 127) velocity = 127; | 
| Chris@320 | 1042 | 
| Chris@849 | 1043             NoteModel *model = getConformingOutput<NoteModel>(n); | 
| Chris@441 | 1044             if (!model) return; | 
| Chris@441 | 1045             model->addPoint(NoteModel::Point(frame, value, // value is pitch | 
| Chris@1039 | 1046                                              duration, | 
| Chris@441 | 1047                                              velocity / 127.f, | 
| Chris@441 | 1048                                              feature.label.c_str())); | 
| Chris@441 | 1049         } else { | 
| gyorgyf@786 | 1050 | 
| Chris@849 | 1051             RegionModel *model = getConformingOutput<RegionModel>(n); | 
| Chris@454 | 1052             if (!model) return; | 
| Chris@454 | 1053 | 
| Chris@474 | 1054             if (feature.hasDuration && !feature.values.empty()) { | 
| Chris@454 | 1055 | 
| Chris@930 | 1056                 for (int i = 0; i < (int)feature.values.size(); ++i) { | 
| Chris@454 | 1057 | 
| Chris@454 | 1058                     float value = feature.values[i]; | 
| Chris@454 | 1059 | 
| Chris@454 | 1060                     QString label = feature.label.c_str(); | 
| Chris@454 | 1061                     if (feature.values.size() > 1) { | 
| Chris@454 | 1062                         label = QString("[%1] %2").arg(i+1).arg(label); | 
| Chris@454 | 1063                     } | 
| Chris@454 | 1064 | 
| Chris@1039 | 1065                     model->addPoint(RegionModel::Point(frame, | 
| Chris@1039 | 1066                                                        value, | 
| Chris@1039 | 1067                                                        duration, | 
| Chris@454 | 1068                                                        label)); | 
| Chris@454 | 1069                 } | 
| Chris@454 | 1070             } else { | 
| Chris@454 | 1071 | 
| Chris@1039 | 1072                 model->addPoint(RegionModel::Point(frame, | 
| Chris@1039 | 1073                                                    value, | 
| Chris@1039 | 1074                                                    duration, | 
| Chris@441 | 1075                                                    feature.label.c_str())); | 
| Chris@454 | 1076             } | 
| Chris@441 | 1077         } | 
| Chris@320 | 1078 | 
| Chris@849 | 1079     } else if (isOutput<EditableDenseThreeDimensionalModel>(n)) { | 
| Chris@320 | 1080 | 
| Chris@1154 | 1081 	DenseThreeDimensionalModel::Column values = feature.values; | 
| Chris@320 | 1082 | 
| Chris@320 | 1083 	EditableDenseThreeDimensionalModel *model = | 
| Chris@849 | 1084             getConformingOutput<EditableDenseThreeDimensionalModel>(n); | 
| Chris@320 | 1085 	if (!model) return; | 
| Chris@320 | 1086 | 
| Chris@889 | 1087 //        cerr << "(note: model resolution = " << model->getResolution() << ")" | 
| Chris@889 | 1088 //             << endl; | 
| Chris@889 | 1089 | 
| Chris@891 | 1090         if (!feature.hasTimestamp && m_fixedRateFeatureNos[n] >= 0) { | 
| Chris@891 | 1091             model->setColumn(m_fixedRateFeatureNos[n], values); | 
| Chris@889 | 1092         } else { | 
| Chris@1039 | 1093             model->setColumn(int(frame / model->getResolution()), values); | 
| Chris@889 | 1094         } | 
| Chris@441 | 1095 | 
| Chris@441 | 1096     } else { | 
| Chris@690 | 1097         SVDEBUG << "FeatureExtractionModelTransformer::addFeature: Unknown output model type!" << endl; | 
| Chris@320 | 1098     } | 
| Chris@320 | 1099 } | 
| Chris@320 | 1100 | 
| Chris@320 | 1101 void | 
| Chris@850 | 1102 FeatureExtractionModelTransformer::setCompletion(int n, int completion) | 
| Chris@320 | 1103 { | 
| Chris@690 | 1104 //    SVDEBUG << "FeatureExtractionModelTransformer::setCompletion(" | 
| Chris@687 | 1105 //              << completion << ")" << endl; | 
| Chris@320 | 1106 | 
| Chris@849 | 1107     if (isOutput<SparseOneDimensionalModel>(n)) { | 
| Chris@320 | 1108 | 
| Chris@350 | 1109 	SparseOneDimensionalModel *model = | 
| Chris@849 | 1110             getConformingOutput<SparseOneDimensionalModel>(n); | 
| Chris@320 | 1111 	if (!model) return; | 
| Chris@923 | 1112         if (model->isAbandoning()) abandon(); | 
| Chris@441 | 1113 	model->setCompletion(completion, true); | 
| Chris@320 | 1114 | 
| Chris@849 | 1115     } else if (isOutput<SparseTimeValueModel>(n)) { | 
| Chris@320 | 1116 | 
| Chris@350 | 1117 	SparseTimeValueModel *model = | 
| Chris@849 | 1118             getConformingOutput<SparseTimeValueModel>(n); | 
| Chris@320 | 1119 	if (!model) return; | 
| Chris@923 | 1120         if (model->isAbandoning()) abandon(); | 
| Chris@441 | 1121 	model->setCompletion(completion, true); | 
| Chris@320 | 1122 | 
| Chris@849 | 1123     } else if (isOutput<NoteModel>(n)) { | 
| Chris@320 | 1124 | 
| Chris@849 | 1125 	NoteModel *model = getConformingOutput<NoteModel>(n); | 
| Chris@320 | 1126 	if (!model) return; | 
| Chris@923 | 1127         if (model->isAbandoning()) abandon(); | 
| Chris@441 | 1128 	model->setCompletion(completion, true); | 
| gyorgyf@786 | 1129 | 
| Chris@923 | 1130     } else if (isOutput<FlexiNoteModel>(n)) { | 
| gyorgyf@786 | 1131 | 
| Chris@849 | 1132 	FlexiNoteModel *model = getConformingOutput<FlexiNoteModel>(n); | 
| gyorgyf@786 | 1133 	if (!model) return; | 
| Chris@923 | 1134         if (model->isAbandoning()) abandon(); | 
| gyorgyf@786 | 1135 	model->setCompletion(completion, true); | 
| Chris@320 | 1136 | 
| Chris@849 | 1137     } else if (isOutput<RegionModel>(n)) { | 
| Chris@441 | 1138 | 
| Chris@849 | 1139 	RegionModel *model = getConformingOutput<RegionModel>(n); | 
| Chris@441 | 1140 	if (!model) return; | 
| Chris@923 | 1141         if (model->isAbandoning()) abandon(); | 
| Chris@441 | 1142 	model->setCompletion(completion, true); | 
| Chris@441 | 1143 | 
| Chris@849 | 1144     } else if (isOutput<EditableDenseThreeDimensionalModel>(n)) { | 
| Chris@320 | 1145 | 
| Chris@320 | 1146 	EditableDenseThreeDimensionalModel *model = | 
| Chris@849 | 1147             getConformingOutput<EditableDenseThreeDimensionalModel>(n); | 
| Chris@320 | 1148 	if (!model) return; | 
| Chris@923 | 1149         if (model->isAbandoning()) abandon(); | 
| Chris@350 | 1150 	model->setCompletion(completion, true); //!!!m_context.updates); | 
| Chris@320 | 1151     } | 
| Chris@320 | 1152 } | 
| Chris@320 | 1153 |