Chris@320: /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ Chris@320: Chris@320: /* Chris@320: Sonic Visualiser Chris@320: An audio file viewer and annotation editor. Chris@320: Centre for Digital Music, Queen Mary, University of London. Chris@320: This file copyright 2006 Chris Cannam and QMUL. Chris@320: Chris@320: This program is free software; you can redistribute it and/or Chris@320: modify it under the terms of the GNU General Public License as Chris@320: published by the Free Software Foundation; either version 2 of the Chris@320: License, or (at your option) any later version. See the file Chris@320: COPYING included with this distribution for more information. Chris@320: */ Chris@320: Chris@331: #include "FeatureExtractionModelTransformer.h" Chris@320: Chris@320: #include "plugin/FeatureExtractionPluginFactory.h" Chris@320: #include "plugin/PluginXml.h" Chris@320: #include "vamp-sdk/Plugin.h" Chris@320: Chris@320: #include "data/model/Model.h" Chris@320: #include "base/Window.h" Chris@320: #include "data/model/SparseOneDimensionalModel.h" Chris@320: #include "data/model/SparseTimeValueModel.h" Chris@320: #include "data/model/EditableDenseThreeDimensionalModel.h" Chris@320: #include "data/model/DenseTimeValueModel.h" Chris@320: #include "data/model/NoteModel.h" Chris@320: #include "data/model/FFTModel.h" Chris@320: #include "data/model/WaveFileModel.h" Chris@320: Chris@320: #include Chris@320: Chris@320: #include Chris@320: Chris@331: FeatureExtractionModelTransformer::FeatureExtractionModelTransformer(Model *inputModel, Chris@320: QString pluginId, Chris@320: const ExecutionContext &context, Chris@320: QString configurationXml, Chris@320: QString outputName) : Chris@328: PluginTransformer(inputModel, context), Chris@320: m_plugin(0), Chris@320: m_descriptor(0), Chris@320: m_outputFeatureNo(0) Chris@320: { Chris@331: // std::cerr << "FeatureExtractionModelTransformer::FeatureExtractionModelTransformer: plugin " << pluginId.toStdString() << ", outputName " << outputName.toStdString() << std::endl; Chris@320: Chris@320: FeatureExtractionPluginFactory *factory = Chris@320: FeatureExtractionPluginFactory::instanceFor(pluginId); Chris@320: Chris@320: if (!factory) { Chris@331: std::cerr << "FeatureExtractionModelTransformer: No factory available for plugin id \"" Chris@320: << pluginId.toStdString() << "\"" << std::endl; Chris@320: return; Chris@320: } Chris@320: Chris@320: m_plugin = factory->instantiatePlugin(pluginId, m_input->getSampleRate()); Chris@320: Chris@320: if (!m_plugin) { Chris@331: std::cerr << "FeatureExtractionModelTransformer: Failed to instantiate plugin \"" Chris@320: << pluginId.toStdString() << "\"" << std::endl; Chris@320: return; Chris@320: } Chris@320: Chris@320: if (configurationXml != "") { Chris@320: PluginXml(m_plugin).setParametersFromXml(configurationXml); Chris@320: } Chris@320: Chris@320: DenseTimeValueModel *input = getInput(); Chris@320: if (!input) return; Chris@320: Chris@320: size_t channelCount = input->getChannelCount(); Chris@320: if (m_plugin->getMaxChannelCount() < channelCount) { Chris@320: channelCount = 1; Chris@320: } Chris@320: if (m_plugin->getMinChannelCount() > channelCount) { Chris@331: std::cerr << "FeatureExtractionModelTransformer:: " Chris@320: << "Can't provide enough channels to plugin (plugin min " Chris@320: << m_plugin->getMinChannelCount() << ", max " Chris@320: << m_plugin->getMaxChannelCount() << ", input model has " Chris@320: << input->getChannelCount() << ")" << std::endl; Chris@320: return; Chris@320: } Chris@320: Chris@320: std::cerr << "Initialising feature extraction plugin with channels = " Chris@320: << channelCount << ", step = " << m_context.stepSize Chris@320: << ", block = " << m_context.blockSize << std::endl; Chris@320: Chris@320: if (!m_plugin->initialise(channelCount, Chris@320: m_context.stepSize, Chris@320: m_context.blockSize)) { Chris@331: std::cerr << "FeatureExtractionModelTransformer: Plugin " Chris@320: << m_plugin->getIdentifier() << " failed to initialise!" << std::endl; Chris@320: return; Chris@320: } Chris@320: Chris@320: Vamp::Plugin::OutputList outputs = m_plugin->getOutputDescriptors(); Chris@320: Chris@320: if (outputs.empty()) { Chris@331: std::cerr << "FeatureExtractionModelTransformer: Plugin \"" Chris@320: << pluginId.toStdString() << "\" has no outputs" << std::endl; Chris@320: return; Chris@320: } Chris@320: Chris@320: for (size_t i = 0; i < outputs.size(); ++i) { Chris@320: if (outputName == "" || outputs[i].identifier == outputName.toStdString()) { Chris@320: m_outputFeatureNo = i; Chris@320: m_descriptor = new Vamp::Plugin::OutputDescriptor Chris@320: (outputs[i]); Chris@320: break; Chris@320: } Chris@320: } Chris@320: Chris@320: if (!m_descriptor) { Chris@331: std::cerr << "FeatureExtractionModelTransformer: Plugin \"" Chris@320: << pluginId.toStdString() << "\" has no output named \"" Chris@320: << outputName.toStdString() << "\"" << std::endl; Chris@320: return; Chris@320: } Chris@320: Chris@331: // std::cerr << "FeatureExtractionModelTransformer: output sample type " Chris@320: // << m_descriptor->sampleType << std::endl; Chris@320: Chris@320: int binCount = 1; Chris@320: float minValue = 0.0, maxValue = 0.0; Chris@320: bool haveExtents = false; Chris@320: Chris@320: if (m_descriptor->hasFixedBinCount) { Chris@320: binCount = m_descriptor->binCount; Chris@320: } Chris@320: Chris@331: // std::cerr << "FeatureExtractionModelTransformer: output bin count " Chris@320: // << binCount << std::endl; Chris@320: Chris@320: if (binCount > 0 && m_descriptor->hasKnownExtents) { Chris@320: minValue = m_descriptor->minValue; Chris@320: maxValue = m_descriptor->maxValue; Chris@320: haveExtents = true; Chris@320: } Chris@320: Chris@320: size_t modelRate = m_input->getSampleRate(); Chris@320: size_t modelResolution = 1; Chris@320: Chris@320: switch (m_descriptor->sampleType) { Chris@320: Chris@320: case Vamp::Plugin::OutputDescriptor::VariableSampleRate: Chris@320: if (m_descriptor->sampleRate != 0.0) { Chris@320: modelResolution = size_t(modelRate / m_descriptor->sampleRate + 0.001); Chris@320: } Chris@320: break; Chris@320: Chris@320: case Vamp::Plugin::OutputDescriptor::OneSamplePerStep: Chris@320: modelResolution = m_context.stepSize; Chris@320: break; Chris@320: Chris@320: case Vamp::Plugin::OutputDescriptor::FixedSampleRate: Chris@320: modelRate = size_t(m_descriptor->sampleRate + 0.001); Chris@320: break; Chris@320: } Chris@320: Chris@320: if (binCount == 0) { Chris@320: Chris@320: m_output = new SparseOneDimensionalModel(modelRate, modelResolution, Chris@320: false); Chris@320: Chris@320: } else if (binCount == 1) { Chris@320: Chris@320: SparseTimeValueModel *model; Chris@320: if (haveExtents) { Chris@320: model = new SparseTimeValueModel Chris@320: (modelRate, modelResolution, minValue, maxValue, false); Chris@320: } else { Chris@320: model = new SparseTimeValueModel Chris@320: (modelRate, modelResolution, false); Chris@320: } Chris@320: model->setScaleUnits(outputs[m_outputFeatureNo].unit.c_str()); Chris@320: Chris@320: m_output = model; Chris@320: Chris@320: } else if (m_descriptor->sampleType == Chris@320: Vamp::Plugin::OutputDescriptor::VariableSampleRate) { Chris@320: Chris@320: // We don't have a sparse 3D model, so interpret this as a Chris@320: // note model. There's nothing to define which values to use Chris@320: // as which parameters of the note -- for the moment let's Chris@320: // treat the first as pitch, second as duration in frames, Chris@320: // third (if present) as velocity. (Our note model doesn't Chris@320: // yet store velocity.) Chris@320: //!!! todo: ask the user! Chris@320: Chris@320: NoteModel *model; Chris@320: if (haveExtents) { Chris@320: model = new NoteModel Chris@320: (modelRate, modelResolution, minValue, maxValue, false); Chris@320: } else { Chris@320: model = new NoteModel Chris@320: (modelRate, modelResolution, false); Chris@320: } Chris@320: model->setScaleUnits(outputs[m_outputFeatureNo].unit.c_str()); Chris@320: Chris@320: m_output = model; Chris@320: Chris@320: } else { Chris@320: Chris@320: EditableDenseThreeDimensionalModel *model = Chris@320: new EditableDenseThreeDimensionalModel Chris@320: (modelRate, modelResolution, binCount, false); Chris@320: Chris@320: if (!m_descriptor->binNames.empty()) { Chris@320: std::vector names; Chris@320: for (size_t i = 0; i < m_descriptor->binNames.size(); ++i) { Chris@320: names.push_back(m_descriptor->binNames[i].c_str()); Chris@320: } Chris@320: model->setBinNames(names); Chris@320: } Chris@320: Chris@320: m_output = model; Chris@320: } Chris@333: Chris@333: if (m_output) m_output->setSourceModel(m_input); Chris@320: } Chris@320: Chris@331: FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer() Chris@320: { Chris@331: std::cerr << "FeatureExtractionModelTransformer::~FeatureExtractionModelTransformer()" << std::endl; Chris@320: delete m_plugin; Chris@320: delete m_descriptor; Chris@320: } Chris@320: Chris@320: DenseTimeValueModel * Chris@331: FeatureExtractionModelTransformer::getInput() Chris@320: { Chris@320: DenseTimeValueModel *dtvm = Chris@320: dynamic_cast(getInputModel()); Chris@320: if (!dtvm) { Chris@331: std::cerr << "FeatureExtractionModelTransformer::getInput: WARNING: Input model is not conformable to DenseTimeValueModel" << std::endl; Chris@320: } Chris@320: return dtvm; Chris@320: } Chris@320: Chris@320: void Chris@331: FeatureExtractionModelTransformer::run() Chris@320: { Chris@320: DenseTimeValueModel *input = getInput(); Chris@320: if (!input) return; Chris@320: Chris@320: if (!m_output) return; Chris@320: Chris@320: while (!input->isReady()) { Chris@320: /* Chris@320: if (dynamic_cast(input)) { Chris@331: std::cerr << "FeatureExtractionModelTransformer::run: Model is not ready, but it's not a WaveFileModel (it's a " << typeid(input).name() << "), so that's OK" << std::endl; Chris@320: sleep(2); Chris@320: break; // no need to wait Chris@320: } Chris@320: */ Chris@331: std::cerr << "FeatureExtractionModelTransformer::run: Waiting for input model to be ready..." << std::endl; Chris@320: sleep(1); Chris@320: } Chris@320: Chris@320: size_t sampleRate = m_input->getSampleRate(); Chris@320: Chris@320: size_t channelCount = input->getChannelCount(); Chris@320: if (m_plugin->getMaxChannelCount() < channelCount) { Chris@320: channelCount = 1; Chris@320: } Chris@320: Chris@320: float **buffers = new float*[channelCount]; Chris@320: for (size_t ch = 0; ch < channelCount; ++ch) { Chris@320: buffers[ch] = new float[m_context.blockSize + 2]; Chris@320: } Chris@320: Chris@320: bool frequencyDomain = (m_plugin->getInputDomain() == Chris@320: Vamp::Plugin::FrequencyDomain); Chris@320: std::vector fftModels; Chris@320: Chris@320: if (frequencyDomain) { Chris@320: for (size_t ch = 0; ch < channelCount; ++ch) { Chris@320: FFTModel *model = new FFTModel Chris@320: (getInput(), Chris@320: channelCount == 1 ? m_context.channel : ch, Chris@320: m_context.windowType, Chris@320: m_context.blockSize, Chris@320: m_context.stepSize, Chris@320: m_context.blockSize, Chris@334: false, Chris@334: StorageAdviser::PrecisionCritical); Chris@320: if (!model->isOK()) { Chris@320: QMessageBox::critical Chris@320: (0, tr("FFT cache failed"), Chris@320: tr("Failed to create the FFT model for this transform.\n" Chris@320: "There may be insufficient memory or disc space to continue.")); Chris@320: delete model; Chris@320: setCompletion(100); Chris@320: return; Chris@320: } Chris@320: model->resume(); Chris@320: fftModels.push_back(model); Chris@320: } Chris@320: } Chris@320: Chris@320: long startFrame = m_input->getStartFrame(); Chris@320: long endFrame = m_input->getEndFrame(); Chris@320: Chris@320: long contextStart = m_context.startFrame; Chris@320: long contextDuration = m_context.duration; Chris@320: Chris@320: if (contextStart == 0 || contextStart < startFrame) { Chris@320: contextStart = startFrame; Chris@320: } Chris@320: Chris@320: if (contextDuration == 0) { Chris@320: contextDuration = endFrame - contextStart; Chris@320: } Chris@320: if (contextStart + contextDuration > endFrame) { Chris@320: contextDuration = endFrame - contextStart; Chris@320: } Chris@320: Chris@320: long blockFrame = contextStart; Chris@320: Chris@320: long prevCompletion = 0; Chris@320: Chris@320: setCompletion(0); Chris@320: Chris@320: while (!m_abandoned) { Chris@320: Chris@320: if (frequencyDomain) { Chris@320: if (blockFrame - int(m_context.blockSize)/2 > Chris@320: contextStart + contextDuration) break; Chris@320: } else { Chris@320: if (blockFrame >= Chris@320: contextStart + contextDuration) break; Chris@320: } Chris@320: Chris@331: // std::cerr << "FeatureExtractionModelTransformer::run: blockFrame " Chris@320: // << blockFrame << ", endFrame " << endFrame << ", blockSize " Chris@320: // << m_context.blockSize << std::endl; Chris@320: Chris@320: long completion = Chris@320: (((blockFrame - contextStart) / m_context.stepSize) * 99) / Chris@320: (contextDuration / m_context.stepSize); Chris@320: Chris@320: // channelCount is either m_input->channelCount or 1 Chris@320: Chris@320: for (size_t ch = 0; ch < channelCount; ++ch) { Chris@320: if (frequencyDomain) { Chris@320: int column = (blockFrame - startFrame) / m_context.stepSize; Chris@320: for (size_t i = 0; i <= m_context.blockSize/2; ++i) { Chris@320: fftModels[ch]->getValuesAt Chris@320: (column, i, buffers[ch][i*2], buffers[ch][i*2+1]); Chris@320: } Chris@320: } else { Chris@320: getFrames(ch, channelCount, Chris@320: blockFrame, m_context.blockSize, buffers[ch]); Chris@320: } Chris@320: } Chris@320: Chris@320: Vamp::Plugin::FeatureSet features = m_plugin->process Chris@320: (buffers, Vamp::RealTime::frame2RealTime(blockFrame, sampleRate)); Chris@320: Chris@320: for (size_t fi = 0; fi < features[m_outputFeatureNo].size(); ++fi) { Chris@320: Vamp::Plugin::Feature feature = Chris@320: features[m_outputFeatureNo][fi]; Chris@320: addFeature(blockFrame, feature); Chris@320: } Chris@320: Chris@320: if (blockFrame == contextStart || completion > prevCompletion) { Chris@320: setCompletion(completion); Chris@320: prevCompletion = completion; Chris@320: } Chris@320: Chris@320: blockFrame += m_context.stepSize; Chris@320: } Chris@320: Chris@320: if (m_abandoned) return; Chris@320: Chris@320: Vamp::Plugin::FeatureSet features = m_plugin->getRemainingFeatures(); Chris@320: Chris@320: for (size_t fi = 0; fi < features[m_outputFeatureNo].size(); ++fi) { Chris@320: Vamp::Plugin::Feature feature = Chris@320: features[m_outputFeatureNo][fi]; Chris@320: addFeature(blockFrame, feature); Chris@320: } Chris@320: Chris@320: if (frequencyDomain) { Chris@320: for (size_t ch = 0; ch < channelCount; ++ch) { Chris@320: delete fftModels[ch]; Chris@320: } Chris@320: } Chris@320: Chris@320: setCompletion(100); Chris@320: } Chris@320: Chris@320: void Chris@331: FeatureExtractionModelTransformer::getFrames(int channel, int channelCount, Chris@320: long startFrame, long size, Chris@320: float *buffer) Chris@320: { Chris@320: long offset = 0; Chris@320: Chris@320: if (startFrame < 0) { Chris@320: for (int i = 0; i < size && startFrame + i < 0; ++i) { Chris@320: buffer[i] = 0.0f; Chris@320: } Chris@320: offset = -startFrame; Chris@320: size -= offset; Chris@320: if (size <= 0) return; Chris@320: startFrame = 0; Chris@320: } Chris@320: Chris@320: long got = getInput()->getData Chris@320: ((channelCount == 1 ? m_context.channel : channel), Chris@320: startFrame, size, buffer + offset); Chris@320: Chris@320: while (got < size) { Chris@320: buffer[offset + got] = 0.0; Chris@320: ++got; Chris@320: } Chris@320: Chris@320: if (m_context.channel == -1 && channelCount == 1 && Chris@320: getInput()->getChannelCount() > 1) { Chris@320: // use mean instead of sum, as plugin input Chris@320: int cc = getInput()->getChannelCount(); Chris@320: for (long i = 0; i < size; ++i) { Chris@320: buffer[i] /= cc; Chris@320: } Chris@320: } Chris@320: } Chris@320: Chris@320: void Chris@331: FeatureExtractionModelTransformer::addFeature(size_t blockFrame, Chris@320: const Vamp::Plugin::Feature &feature) Chris@320: { Chris@320: size_t inputRate = m_input->getSampleRate(); Chris@320: Chris@331: // std::cerr << "FeatureExtractionModelTransformer::addFeature(" Chris@320: // << blockFrame << ")" << std::endl; Chris@320: Chris@320: int binCount = 1; Chris@320: if (m_descriptor->hasFixedBinCount) { Chris@320: binCount = m_descriptor->binCount; Chris@320: } Chris@320: Chris@320: size_t frame = blockFrame; Chris@320: Chris@320: if (m_descriptor->sampleType == Chris@320: Vamp::Plugin::OutputDescriptor::VariableSampleRate) { Chris@320: Chris@320: if (!feature.hasTimestamp) { Chris@320: std::cerr Chris@331: << "WARNING: FeatureExtractionModelTransformer::addFeature: " Chris@320: << "Feature has variable sample rate but no timestamp!" Chris@320: << std::endl; Chris@320: return; Chris@320: } else { Chris@320: frame = Vamp::RealTime::realTime2Frame(feature.timestamp, inputRate); Chris@320: } Chris@320: Chris@320: } else if (m_descriptor->sampleType == Chris@320: Vamp::Plugin::OutputDescriptor::FixedSampleRate) { Chris@320: Chris@320: if (feature.hasTimestamp) { Chris@320: //!!! warning: sampleRate may be non-integral Chris@320: frame = Vamp::RealTime::realTime2Frame(feature.timestamp, Chris@320: lrintf(m_descriptor->sampleRate)); Chris@320: } else { Chris@320: frame = m_output->getEndFrame(); Chris@320: } Chris@320: } Chris@320: Chris@320: if (binCount == 0) { Chris@320: Chris@320: SparseOneDimensionalModel *model = getOutput(); Chris@320: if (!model) return; Chris@320: model->addPoint(SparseOneDimensionalModel::Point(frame, feature.label.c_str())); Chris@320: Chris@320: } else if (binCount == 1) { Chris@320: Chris@320: float value = 0.0; Chris@320: if (feature.values.size() > 0) value = feature.values[0]; Chris@320: Chris@320: SparseTimeValueModel *model = getOutput(); Chris@320: if (!model) return; Chris@320: model->addPoint(SparseTimeValueModel::Point(frame, value, feature.label.c_str())); Chris@320: // std::cerr << "SparseTimeValueModel::addPoint(" << frame << ", " << value << "), " << feature.label.c_str() << std::endl; Chris@320: Chris@320: } else if (m_descriptor->sampleType == Chris@320: Vamp::Plugin::OutputDescriptor::VariableSampleRate) { Chris@320: Chris@320: float pitch = 0.0; Chris@320: if (feature.values.size() > 0) pitch = feature.values[0]; Chris@320: Chris@320: float duration = 1; Chris@320: if (feature.values.size() > 1) duration = feature.values[1]; Chris@320: Chris@320: float velocity = 100; Chris@320: if (feature.values.size() > 2) velocity = feature.values[2]; Chris@320: Chris@320: NoteModel *model = getOutput(); Chris@320: if (!model) return; Chris@320: Chris@320: model->addPoint(NoteModel::Point(frame, pitch, Chris@320: lrintf(duration), Chris@320: feature.label.c_str())); Chris@320: Chris@320: } else { Chris@320: Chris@320: DenseThreeDimensionalModel::Column values = feature.values; Chris@320: Chris@320: EditableDenseThreeDimensionalModel *model = Chris@320: getOutput(); Chris@320: if (!model) return; Chris@320: Chris@320: model->setColumn(frame / model->getResolution(), values); Chris@320: } Chris@320: } Chris@320: Chris@320: void Chris@331: FeatureExtractionModelTransformer::setCompletion(int completion) Chris@320: { Chris@320: int binCount = 1; Chris@320: if (m_descriptor->hasFixedBinCount) { Chris@320: binCount = m_descriptor->binCount; Chris@320: } Chris@320: Chris@331: // std::cerr << "FeatureExtractionModelTransformer::setCompletion(" Chris@320: // << completion << ")" << std::endl; Chris@320: Chris@320: if (binCount == 0) { Chris@320: Chris@320: SparseOneDimensionalModel *model = getOutput(); Chris@320: if (!model) return; Chris@333: model->setCompletion(completion, m_context.updates); Chris@320: Chris@320: } else if (binCount == 1) { Chris@320: Chris@320: SparseTimeValueModel *model = getOutput(); Chris@320: if (!model) return; Chris@333: model->setCompletion(completion, m_context.updates); Chris@320: Chris@320: } else if (m_descriptor->sampleType == Chris@320: Vamp::Plugin::OutputDescriptor::VariableSampleRate) { Chris@320: Chris@320: NoteModel *model = getOutput(); Chris@320: if (!model) return; Chris@333: model->setCompletion(completion, m_context.updates); Chris@320: Chris@320: } else { Chris@320: Chris@320: EditableDenseThreeDimensionalModel *model = Chris@320: getOutput(); Chris@320: if (!model) return; Chris@333: model->setCompletion(completion, m_context.updates); Chris@320: } Chris@320: } Chris@320: