Chris@320: /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
Chris@320: 
Chris@320: /*
Chris@320:     Sonic Visualiser
Chris@320:     An audio file viewer and annotation editor.
Chris@320:     Centre for Digital Music, Queen Mary, University of London.
Chris@320:     This file copyright 2006 Chris Cannam and QMUL.
Chris@320:     
Chris@320:     This program is free software; you can redistribute it and/or
Chris@320:     modify it under the terms of the GNU General Public License as
Chris@320:     published by the Free Software Foundation; either version 2 of the
Chris@320:     License, or (at your option) any later version.  See the file
Chris@320:     COPYING included with this distribution for more information.
Chris@320: */
Chris@320: 
Chris@331: #include "FeatureExtractionModelTransformer.h"
Chris@320: 
Chris@320: #include "plugin/FeatureExtractionPluginFactory.h"
Chris@320: #include "plugin/PluginXml.h"
Chris@320: #include "vamp-sdk/Plugin.h"
Chris@320: 
Chris@320: #include "data/model/Model.h"
Chris@320: #include "base/Window.h"
Chris@387: #include "base/Exceptions.h"
Chris@320: #include "data/model/SparseOneDimensionalModel.h"
Chris@320: #include "data/model/SparseTimeValueModel.h"
Chris@320: #include "data/model/EditableDenseThreeDimensionalModel.h"
Chris@320: #include "data/model/DenseTimeValueModel.h"
Chris@320: #include "data/model/NoteModel.h"
Chris@320: #include "data/model/FFTModel.h"
Chris@320: #include "data/model/WaveFileModel.h"
Chris@320: 
Chris@350: #include "TransformFactory.h"
Chris@350: 
Chris@320: #include <iostream>
Chris@320: 
Chris@350: FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Input in,
Chris@350:                                                                      const Transform &transform) :
Chris@350:     ModelTransformer(in, transform),
Chris@320:     m_plugin(0),
Chris@320:     m_descriptor(0),
Chris@320:     m_outputFeatureNo(0)
Chris@320: {
Chris@350: //    std::cerr << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: plugin " << pluginId.toStdString() << ", outputName " << m_transform.getOutput().toStdString() << std::endl;
Chris@350: 
Chris@350:     QString pluginId = transform.getPluginIdentifier();
Chris@320: 
Chris@320:     FeatureExtractionPluginFactory *factory =
Chris@320: 	FeatureExtractionPluginFactory::instanceFor(pluginId);
Chris@320: 
Chris@320:     if (!factory) {
Chris@361:         m_message = tr("No factory available for feature extraction plugin id \"%1\" (unknown plugin type, or internal error?)").arg(pluginId);
Chris@320: 	return;
Chris@320:     }
Chris@320: 
Chris@350:     DenseTimeValueModel *input = getConformingInput();
Chris@350:     if (!input) {
Chris@361:         m_message = tr("Input model for feature extraction plugin \"%1\" is of wrong type (internal error?)").arg(pluginId);
Chris@350:         return;
Chris@350:     }
Chris@320: 
Chris@350:     m_plugin = factory->instantiatePlugin(pluginId, input->getSampleRate());
Chris@320:     if (!m_plugin) {
Chris@361:         m_message = tr("Failed to instantiate plugin \"%1\"").arg(pluginId);
Chris@320: 	return;
Chris@320:     }
Chris@320: 
Chris@350:     TransformFactory::getInstance()->makeContextConsistentWithPlugin
Chris@350:         (m_transform, m_plugin);
Chris@343: 
Chris@350:     TransformFactory::getInstance()->setPluginParameters
Chris@350:         (m_transform, m_plugin);
Chris@320: 
Chris@320:     size_t channelCount = input->getChannelCount();
Chris@320:     if (m_plugin->getMaxChannelCount() < channelCount) {
Chris@320: 	channelCount = 1;
Chris@320:     }
Chris@320:     if (m_plugin->getMinChannelCount() > channelCount) {
Chris@361:         m_message = tr("Cannot provide enough channels to feature extraction plugin \"%1\" (plugin min is %2, max %3; input model has %4)")
Chris@361:             .arg(pluginId)
Chris@361:             .arg(m_plugin->getMinChannelCount())
Chris@361:             .arg(m_plugin->getMaxChannelCount())
Chris@361:             .arg(input->getChannelCount());
Chris@320: 	return;
Chris@320:     }
Chris@320: 
Chris@320:     std::cerr << "Initialising feature extraction plugin with channels = "
Chris@350:               << channelCount << ", step = " << m_transform.getStepSize()
Chris@350:               << ", block = " << m_transform.getBlockSize() << std::endl;
Chris@320: 
Chris@320:     if (!m_plugin->initialise(channelCount,
Chris@350:                               m_transform.getStepSize(),
Chris@350:                               m_transform.getBlockSize())) {
Chris@361: 
Chris@361:         size_t pstep = m_transform.getStepSize();
Chris@361:         size_t pblock = m_transform.getBlockSize();
Chris@361: 
Chris@361:         m_transform.setStepSize(0);
Chris@361:         m_transform.setBlockSize(0);
Chris@361:         TransformFactory::getInstance()->makeContextConsistentWithPlugin
Chris@361:             (m_transform, m_plugin);
Chris@361: 
Chris@361:         if (m_transform.getStepSize() != pstep ||
Chris@361:             m_transform.getBlockSize() != pblock) {
Chris@361:             
Chris@361:             if (!m_plugin->initialise(channelCount,
Chris@361:                                       m_transform.getStepSize(),
Chris@361:                                       m_transform.getBlockSize())) {
Chris@361: 
Chris@361:                 m_message = tr("Failed to initialise feature extraction plugin \"%1\"").arg(pluginId);
Chris@361:                 return;
Chris@361: 
Chris@361:             } else {
Chris@361: 
Chris@361:                 m_message = tr("Feature extraction plugin \"%1\" rejected the given step and block sizes (%2 and %3); using plugin defaults (%4 and %5) instead")
Chris@361:                     .arg(pluginId)
Chris@361:                     .arg(pstep)
Chris@361:                     .arg(pblock)
Chris@361:                     .arg(m_transform.getStepSize())
Chris@361:                     .arg(m_transform.getBlockSize());
Chris@361:             }
Chris@361: 
Chris@361:         } else {
Chris@361: 
Chris@361:             m_message = tr("Failed to initialise feature extraction plugin \"%1\"").arg(pluginId);
Chris@361:             return;
Chris@361:         }
Chris@320:     }
Chris@320: 
Chris@366:     if (m_transform.getPluginVersion() != "") {
Chris@366:         QString pv = QString("%1").arg(m_plugin->getPluginVersion());
Chris@366:         if (pv != m_transform.getPluginVersion()) {
Chris@366:             QString vm = tr("Transform was configured for version %1 of plugin \"%2\", but the plugin being used is version %3")
Chris@366:                 .arg(m_transform.getPluginVersion())
Chris@366:                 .arg(pluginId)
Chris@366:                 .arg(pv);
Chris@366:             if (m_message != "") {
Chris@366:                 m_message = QString("%1; %2").arg(vm).arg(m_message);
Chris@366:             } else {
Chris@366:                 m_message = vm;
Chris@366:             }
Chris@366:         }
Chris@366:     }
Chris@366: 
Chris@320:     Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors();
Chris@320: 
Chris@320:     if (outputs.empty()) {
Chris@361:         m_message = tr("Plugin \"%1\" has no outputs").arg(pluginId);
Chris@320: 	return;
Chris@320:     }
Chris@320:     
Chris@320:     for (size_t i = 0; i < outputs.size(); ++i) {
Chris@429: //        std::cerr << "comparing output " << i << " name \"" << outputs[i].identifier << "\" with expected \"" << m_transform.getOutput().toStdString() << "\"" << std::endl;
Chris@350: 	if (m_transform.getOutput() == "" ||
Chris@350:             outputs[i].identifier == m_transform.getOutput().toStdString()) {
Chris@320: 	    m_outputFeatureNo = i;
Chris@320: 	    m_descriptor = new Vamp::Plugin::OutputDescriptor
Chris@320: 		(outputs[i]);
Chris@320: 	    break;
Chris@320: 	}
Chris@320:     }
Chris@320: 
Chris@320:     if (!m_descriptor) {
Chris@361:         m_message = tr("Plugin \"%1\" has no output named \"%2\"")
Chris@361:             .arg(pluginId)
Chris@361:             .arg(m_transform.getOutput());
Chris@320: 	return;
Chris@320:     }
Chris@320: 
Chris@331: //    std::cerr << "FeatureExtractionModelTransformer: output sample type "
Chris@320: //	      << m_descriptor->sampleType << std::endl;
Chris@320: 
Chris@320:     int binCount = 1;
Chris@320:     float minValue = 0.0, maxValue = 0.0;
Chris@320:     bool haveExtents = false;
Chris@320:     
Chris@320:     if (m_descriptor->hasFixedBinCount) {
Chris@320: 	binCount = m_descriptor->binCount;
Chris@320:     }
Chris@320: 
Chris@331: //    std::cerr << "FeatureExtractionModelTransformer: output bin count "
Chris@320: //	      << binCount << std::endl;
Chris@320: 
Chris@320:     if (binCount > 0 && m_descriptor->hasKnownExtents) {
Chris@320: 	minValue = m_descriptor->minValue;
Chris@320: 	maxValue = m_descriptor->maxValue;
Chris@320:         haveExtents = true;
Chris@320:     }
Chris@320: 
Chris@350:     size_t modelRate = input->getSampleRate();
Chris@320:     size_t modelResolution = 1;
Chris@320:     
Chris@320:     switch (m_descriptor->sampleType) {
Chris@320: 
Chris@320:     case Vamp::Plugin::OutputDescriptor::VariableSampleRate:
Chris@320: 	if (m_descriptor->sampleRate != 0.0) {
Chris@320: 	    modelResolution = size_t(modelRate / m_descriptor->sampleRate + 0.001);
Chris@320: 	}
Chris@320: 	break;
Chris@320: 
Chris@320:     case Vamp::Plugin::OutputDescriptor::OneSamplePerStep:
Chris@350: 	modelResolution = m_transform.getStepSize();
Chris@320: 	break;
Chris@320: 
Chris@320:     case Vamp::Plugin::OutputDescriptor::FixedSampleRate:
Chris@320: 	modelRate = size_t(m_descriptor->sampleRate + 0.001);
Chris@320: 	break;
Chris@320:     }
Chris@320: 
Chris@320:     if (binCount == 0) {
Chris@320: 
Chris@320: 	m_output = new SparseOneDimensionalModel(modelRate, modelResolution,
Chris@320: 						 false);
Chris@320: 
Chris@320:     } else if (binCount == 1) {
Chris@320: 
Chris@320:         SparseTimeValueModel *model;
Chris@320:         if (haveExtents) {
Chris@320:             model = new SparseTimeValueModel
Chris@320:                 (modelRate, modelResolution, minValue, maxValue, false);
Chris@320:         } else {
Chris@320:             model = new SparseTimeValueModel
Chris@320:                 (modelRate, modelResolution, false);
Chris@320:         }
Chris@320:         model->setScaleUnits(outputs[m_outputFeatureNo].unit.c_str());
Chris@320: 
Chris@320:         m_output = model;
Chris@320: 
Chris@320:     } else if (m_descriptor->sampleType ==
Chris@320: 	       Vamp::Plugin::OutputDescriptor::VariableSampleRate) {
Chris@320: 
Chris@320:         // We don't have a sparse 3D model, so interpret this as a
Chris@320:         // note model.  There's nothing to define which values to use
Chris@320:         // as which parameters of the note -- for the moment let's
Chris@320:         // treat the first as pitch, second as duration in frames,
Chris@320:         // third (if present) as velocity. (Our note model doesn't
Chris@320:         // yet store velocity.)
Chris@320:         //!!! todo: ask the user!
Chris@320: 	
Chris@320:         NoteModel *model;
Chris@320:         if (haveExtents) {
Chris@320:             model = new NoteModel
Chris@320:                 (modelRate, modelResolution, minValue, maxValue, false);
Chris@320:         } else {
Chris@320:             model = new NoteModel
Chris@320:                 (modelRate, modelResolution, false);
Chris@320:         }            
Chris@320:         model->setScaleUnits(outputs[m_outputFeatureNo].unit.c_str());
Chris@320: 
Chris@320:         m_output = model;
Chris@320: 
Chris@320:     } else {
Chris@320: 
Chris@320:         EditableDenseThreeDimensionalModel *model =
Chris@320:             new EditableDenseThreeDimensionalModel
Chris@320:             (modelRate, modelResolution, binCount, false);
Chris@320: 
Chris@320: 	if (!m_descriptor->binNames.empty()) {
Chris@320: 	    std::vector<QString> names;
Chris@320: 	    for (size_t i = 0; i < m_descriptor->binNames.size(); ++i) {
Chris@320: 		names.push_back(m_descriptor->binNames[i].c_str());
Chris@320: 	    }
Chris@320: 	    model->setBinNames(names);
Chris@320: 	}
Chris@320:         
Chris@320:         m_output = model;
Chris@320:     }
Chris@333: 
Chris@350:     if (m_output) m_output->setSourceModel(input);
Chris@320: }
Chris@320: 
Chris@331: FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer()
Chris@320: {
Chris@436: //    std::cerr << "FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer()" << std::endl;
Chris@320:     delete m_plugin;
Chris@320:     delete m_descriptor;
Chris@320: }
Chris@320: 
Chris@320: DenseTimeValueModel *
Chris@350: FeatureExtractionModelTransformer::getConformingInput()
Chris@320: {
Chris@408: //    std::cerr << "FeatureExtractionModelTransformer::getConformingInput: input model is " << getInputModel() << std::endl;
Chris@408: 
Chris@320:     DenseTimeValueModel *dtvm =
Chris@320: 	dynamic_cast<DenseTimeValueModel *>(getInputModel());
Chris@320:     if (!dtvm) {
Chris@350: 	std::cerr << "FeatureExtractionModelTransformer::getConformingInput: WARNING: Input model is not conformable to DenseTimeValueModel" << std::endl;
Chris@320:     }
Chris@320:     return dtvm;
Chris@320: }
Chris@320: 
Chris@320: void
Chris@331: FeatureExtractionModelTransformer::run()
Chris@320: {
Chris@350:     DenseTimeValueModel *input = getConformingInput();
Chris@320:     if (!input) return;
Chris@320: 
Chris@320:     if (!m_output) return;
Chris@320: 
Chris@320:     while (!input->isReady()) {
Chris@331:         std::cerr << "FeatureExtractionModelTransformer::run: Waiting for input model to be ready..." << std::endl;
Chris@320:         sleep(1);
Chris@320:     }
Chris@320: 
Chris@350:     size_t sampleRate = input->getSampleRate();
Chris@320: 
Chris@320:     size_t channelCount = input->getChannelCount();
Chris@320:     if (m_plugin->getMaxChannelCount() < channelCount) {
Chris@320: 	channelCount = 1;
Chris@320:     }
Chris@320: 
Chris@320:     float **buffers = new float*[channelCount];
Chris@320:     for (size_t ch = 0; ch < channelCount; ++ch) {
Chris@350: 	buffers[ch] = new float[m_transform.getBlockSize() + 2];
Chris@320:     }
Chris@320: 
Chris@350:     size_t stepSize = m_transform.getStepSize();
Chris@350:     size_t blockSize = m_transform.getBlockSize();
Chris@350: 
Chris@320:     bool frequencyDomain = (m_plugin->getInputDomain() ==
Chris@320:                             Vamp::Plugin::FrequencyDomain);
Chris@320:     std::vector<FFTModel *> fftModels;
Chris@320: 
Chris@320:     if (frequencyDomain) {
Chris@320:         for (size_t ch = 0; ch < channelCount; ++ch) {
Chris@320:             FFTModel *model = new FFTModel
Chris@350:                                   (getConformingInput(),
Chris@350:                                    channelCount == 1 ? m_input.getChannel() : ch,
Chris@350:                                    m_transform.getWindowType(),
Chris@350:                                    blockSize,
Chris@350:                                    stepSize,
Chris@350:                                    blockSize,
Chris@334:                                    false,
Chris@334:                                    StorageAdviser::PrecisionCritical);
Chris@320:             if (!model->isOK()) {
Chris@320:                 delete model;
Chris@320:                 setCompletion(100);
Chris@387:                 //!!! need a better way to handle this -- previously we were using a QMessageBox but that isn't an appropriate thing to do here either
Chris@387:                 throw AllocationFailed("Failed to create the FFT model for this feature extraction model transformer");
Chris@320:             }
Chris@320:             model->resume();
Chris@320:             fftModels.push_back(model);
Chris@320:         }
Chris@320:     }
Chris@320: 
Chris@350:     long startFrame = m_input.getModel()->getStartFrame();
Chris@350:     long   endFrame = m_input.getModel()->getEndFrame();
Chris@320: 
Chris@350:     RealTime contextStartRT = m_transform.getStartTime();
Chris@350:     RealTime contextDurationRT = m_transform.getDuration();
Chris@350: 
Chris@350:     long contextStart =
Chris@350:         RealTime::realTime2Frame(contextStartRT, sampleRate);
Chris@350: 
Chris@350:     long contextDuration =
Chris@350:         RealTime::realTime2Frame(contextDurationRT, sampleRate);
Chris@320: 
Chris@320:     if (contextStart == 0 || contextStart < startFrame) {
Chris@320:         contextStart = startFrame;
Chris@320:     }
Chris@320: 
Chris@320:     if (contextDuration == 0) {
Chris@320:         contextDuration = endFrame - contextStart;
Chris@320:     }
Chris@320:     if (contextStart + contextDuration > endFrame) {
Chris@320:         contextDuration = endFrame - contextStart;
Chris@320:     }
Chris@320: 
Chris@320:     long blockFrame = contextStart;
Chris@320: 
Chris@320:     long prevCompletion = 0;
Chris@320: 
Chris@320:     setCompletion(0);
Chris@320: 
Chris@320:     while (!m_abandoned) {
Chris@320: 
Chris@320:         if (frequencyDomain) {
Chris@350:             if (blockFrame - int(blockSize)/2 >
Chris@320:                 contextStart + contextDuration) break;
Chris@320:         } else {
Chris@320:             if (blockFrame >= 
Chris@320:                 contextStart + contextDuration) break;
Chris@320:         }
Chris@320: 
Chris@331: //	std::cerr << "FeatureExtractionModelTransformer::run: blockFrame "
Chris@320: //		  << blockFrame << ", endFrame " << endFrame << ", blockSize "
Chris@350: //                  << blockSize << std::endl;
Chris@320: 
Chris@320: 	long completion =
Chris@350: 	    (((blockFrame - contextStart) / stepSize) * 99) /
Chris@350: 	    (contextDuration / stepSize);
Chris@320: 
Chris@350: 	// channelCount is either m_input.getModel()->channelCount or 1
Chris@320: 
Chris@363:         if (frequencyDomain) {
Chris@363:             for (size_t ch = 0; ch < channelCount; ++ch) {
Chris@350:                 int column = (blockFrame - startFrame) / stepSize;
Chris@350:                 for (size_t i = 0; i <= blockSize/2; ++i) {
Chris@320:                     fftModels[ch]->getValuesAt
Chris@320:                         (column, i, buffers[ch][i*2], buffers[ch][i*2+1]);
Chris@320:                 }
Chris@363:             }
Chris@363:         } else {
Chris@363:             getFrames(channelCount, blockFrame, blockSize, buffers);
Chris@320:         }
Chris@320: 
Chris@320: 	Vamp::Plugin::FeatureSet features = m_plugin->process
Chris@320: 	    (buffers, Vamp::RealTime::frame2RealTime(blockFrame, sampleRate));
Chris@320: 
Chris@320: 	for (size_t fi = 0; fi < features[m_outputFeatureNo].size(); ++fi) {
Chris@320: 	    Vamp::Plugin::Feature feature =
Chris@320: 		features[m_outputFeatureNo][fi];
Chris@320: 	    addFeature(blockFrame, feature);
Chris@320: 	}
Chris@320: 
Chris@320: 	if (blockFrame == contextStart || completion > prevCompletion) {
Chris@320: 	    setCompletion(completion);
Chris@320: 	    prevCompletion = completion;
Chris@320: 	}
Chris@320: 
Chris@350: 	blockFrame += stepSize;
Chris@320:     }
Chris@320: 
Chris@320:     if (m_abandoned) return;
Chris@320: 
Chris@320:     Vamp::Plugin::FeatureSet features = m_plugin->getRemainingFeatures();
Chris@320: 
Chris@320:     for (size_t fi = 0; fi < features[m_outputFeatureNo].size(); ++fi) {
Chris@320: 	Vamp::Plugin::Feature feature =
Chris@320: 	    features[m_outputFeatureNo][fi];
Chris@320: 	addFeature(blockFrame, feature);
Chris@320:     }
Chris@320: 
Chris@320:     if (frequencyDomain) {
Chris@320:         for (size_t ch = 0; ch < channelCount; ++ch) {
Chris@320:             delete fftModels[ch];
Chris@320:         }
Chris@320:     }
Chris@320: 
Chris@320:     setCompletion(100);
Chris@320: }
Chris@320: 
Chris@320: void
Chris@363: FeatureExtractionModelTransformer::getFrames(int channelCount,
Chris@363:                                              long startFrame, long size,
Chris@363:                                              float **buffers)
Chris@320: {
Chris@320:     long offset = 0;
Chris@320: 
Chris@320:     if (startFrame < 0) {
Chris@363:         for (int c = 0; c < channelCount; ++c) {
Chris@363:             for (int i = 0; i < size && startFrame + i < 0; ++i) {
Chris@363:                 buffers[c][i] = 0.0f;
Chris@363:             }
Chris@320:         }
Chris@320:         offset = -startFrame;
Chris@320:         size -= offset;
Chris@320:         if (size <= 0) return;
Chris@320:         startFrame = 0;
Chris@320:     }
Chris@320: 
Chris@350:     DenseTimeValueModel *input = getConformingInput();
Chris@350:     if (!input) return;
Chris@363:     
Chris@363:     long got = 0;
Chris@350: 
Chris@363:     if (channelCount == 1) {
Chris@363: 
Chris@363:         got = input->getData(m_input.getChannel(), startFrame, size,
Chris@363:                              buffers[0] + offset);
Chris@363: 
Chris@363:         if (m_input.getChannel() == -1 && input->getChannelCount() > 1) {
Chris@363:             // use mean instead of sum, as plugin input
Chris@363:             float cc = float(input->getChannelCount());
Chris@363:             for (long i = 0; i < size; ++i) {
Chris@363:                 buffers[0][i + offset] /= cc;
Chris@363:             }
Chris@363:         }
Chris@363: 
Chris@363:     } else {
Chris@363: 
Chris@363:         float **writebuf = buffers;
Chris@363:         if (offset > 0) {
Chris@363:             writebuf = new float *[channelCount];
Chris@363:             for (int i = 0; i < channelCount; ++i) {
Chris@363:                 writebuf[i] = buffers[i] + offset;
Chris@363:             }
Chris@363:         }
Chris@363: 
Chris@363:         got = input->getData(0, channelCount-1, startFrame, size, writebuf);
Chris@363: 
Chris@363:         if (writebuf != buffers) delete[] writebuf;
Chris@363:     }
Chris@320: 
Chris@320:     while (got < size) {
Chris@363:         for (int c = 0; c < channelCount; ++c) {
Chris@363:             buffers[c][got + offset] = 0.0;
Chris@363:         }
Chris@320:         ++got;
Chris@320:     }
Chris@320: }
Chris@320: 
Chris@320: void
Chris@331: FeatureExtractionModelTransformer::addFeature(size_t blockFrame,
Chris@320: 					     const Vamp::Plugin::Feature &feature)
Chris@320: {
Chris@350:     size_t inputRate = m_input.getModel()->getSampleRate();
Chris@320: 
Chris@331: //    std::cerr << "FeatureExtractionModelTransformer::addFeature("
Chris@320: //	      << blockFrame << ")" << std::endl;
Chris@320: 
Chris@320:     int binCount = 1;
Chris@320:     if (m_descriptor->hasFixedBinCount) {
Chris@320: 	binCount = m_descriptor->binCount;
Chris@320:     }
Chris@320: 
Chris@320:     size_t frame = blockFrame;
Chris@320: 
Chris@320:     if (m_descriptor->sampleType ==
Chris@320: 	Vamp::Plugin::OutputDescriptor::VariableSampleRate) {
Chris@320: 
Chris@320: 	if (!feature.hasTimestamp) {
Chris@320: 	    std::cerr
Chris@331: 		<< "WARNING: FeatureExtractionModelTransformer::addFeature: "
Chris@320: 		<< "Feature has variable sample rate but no timestamp!"
Chris@320: 		<< std::endl;
Chris@320: 	    return;
Chris@320: 	} else {
Chris@320: 	    frame = Vamp::RealTime::realTime2Frame(feature.timestamp, inputRate);
Chris@320: 	}
Chris@320: 
Chris@320:     } else if (m_descriptor->sampleType ==
Chris@320: 	       Vamp::Plugin::OutputDescriptor::FixedSampleRate) {
Chris@320: 
Chris@320: 	if (feature.hasTimestamp) {
Chris@320: 	    //!!! warning: sampleRate may be non-integral
Chris@320: 	    frame = Vamp::RealTime::realTime2Frame(feature.timestamp,
Chris@320:                                                    lrintf(m_descriptor->sampleRate));
Chris@320: 	} else {
Chris@320: 	    frame = m_output->getEndFrame();
Chris@320: 	}
Chris@320:     }
Chris@320: 	
Chris@320:     if (binCount == 0) {
Chris@320: 
Chris@350: 	SparseOneDimensionalModel *model =
Chris@350:             getConformingOutput<SparseOneDimensionalModel>();
Chris@320: 	if (!model) return;
Chris@350: 
Chris@320: 	model->addPoint(SparseOneDimensionalModel::Point(frame, feature.label.c_str()));
Chris@320: 	
Chris@320:     } else if (binCount == 1) {
Chris@320: 
Chris@320: 	float value = 0.0;
Chris@320: 	if (feature.values.size() > 0) value = feature.values[0];
Chris@320: 
Chris@350: 	SparseTimeValueModel *model =
Chris@350:             getConformingOutput<SparseTimeValueModel>();
Chris@320: 	if (!model) return;
Chris@350: 
Chris@320: 	model->addPoint(SparseTimeValueModel::Point(frame, value, feature.label.c_str()));
Chris@320: //        std::cerr << "SparseTimeValueModel::addPoint(" << frame << ", " << value << "), " << feature.label.c_str() << std::endl;
Chris@320: 
Chris@320:     } else if (m_descriptor->sampleType == 
Chris@320: 	       Vamp::Plugin::OutputDescriptor::VariableSampleRate) {
Chris@320: 
Chris@320:         float pitch = 0.0;
Chris@320:         if (feature.values.size() > 0) pitch = feature.values[0];
Chris@320: 
Chris@320:         float duration = 1;
Chris@320:         if (feature.values.size() > 1) duration = feature.values[1];
Chris@320:         
Chris@320:         float velocity = 100;
Chris@320:         if (feature.values.size() > 2) velocity = feature.values[2];
Chris@340:         if (velocity < 0) velocity = 127;
Chris@340:         if (velocity > 127) velocity = 127;
Chris@320: 
Chris@350:         NoteModel *model = getConformingOutput<NoteModel>();
Chris@320:         if (!model) return;
Chris@320: 
Chris@320:         model->addPoint(NoteModel::Point(frame, pitch,
Chris@320:                                          lrintf(duration),
Chris@340:                                          velocity / 127.f,
Chris@320:                                          feature.label.c_str()));
Chris@320: 	
Chris@320:     } else {
Chris@320: 	
Chris@320: 	DenseThreeDimensionalModel::Column values = feature.values;
Chris@320: 	
Chris@320: 	EditableDenseThreeDimensionalModel *model =
Chris@350:             getConformingOutput<EditableDenseThreeDimensionalModel>();
Chris@320: 	if (!model) return;
Chris@320: 
Chris@320: 	model->setColumn(frame / model->getResolution(), values);
Chris@320:     }
Chris@320: }
Chris@320: 
Chris@320: void
Chris@331: FeatureExtractionModelTransformer::setCompletion(int completion)
Chris@320: {
Chris@320:     int binCount = 1;
Chris@320:     if (m_descriptor->hasFixedBinCount) {
Chris@320: 	binCount = m_descriptor->binCount;
Chris@320:     }
Chris@320: 
Chris@331: //    std::cerr << "FeatureExtractionModelTransformer::setCompletion("
Chris@320: //              << completion << ")" << std::endl;
Chris@320: 
Chris@320:     if (binCount == 0) {
Chris@320: 
Chris@350: 	SparseOneDimensionalModel *model =
Chris@350:             getConformingOutput<SparseOneDimensionalModel>();
Chris@320: 	if (!model) return;
Chris@350: 	model->setCompletion(completion, true); //!!!m_context.updates);
Chris@320: 
Chris@320:     } else if (binCount == 1) {
Chris@320: 
Chris@350: 	SparseTimeValueModel *model =
Chris@350:             getConformingOutput<SparseTimeValueModel>();
Chris@320: 	if (!model) return;
Chris@350: 	model->setCompletion(completion, true); //!!!m_context.updates);
Chris@320: 
Chris@320:     } else if (m_descriptor->sampleType ==
Chris@320: 	       Vamp::Plugin::OutputDescriptor::VariableSampleRate) {
Chris@320: 
Chris@350: 	NoteModel *model =
Chris@350:             getConformingOutput<NoteModel>();
Chris@320: 	if (!model) return;
Chris@350: 	model->setCompletion(completion, true); //!!!m_context.updates);
Chris@320: 
Chris@320:     } else {
Chris@320: 
Chris@320: 	EditableDenseThreeDimensionalModel *model =
Chris@350:             getConformingOutput<EditableDenseThreeDimensionalModel>();
Chris@320: 	if (!model) return;
Chris@350: 	model->setCompletion(completion, true); //!!!m_context.updates);
Chris@320:     }
Chris@320: }
Chris@320: