Mercurial > hg > svcore
diff plugin/transform/RealTimePluginTransform.cpp @ 320:32e50b620a6c
* Move some things around to facilitate plundering libraries for other
applications without needing to duplicate so much code.
sv/osc -> data/osc
sv/audioio -> audioio
sv/transform -> plugin/transform
sv/document -> document (will rename to framework in next commit)
author | Chris Cannam |
---|---|
date | Wed, 24 Oct 2007 16:34:31 +0000 |
parents | |
children |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/plugin/transform/RealTimePluginTransform.cpp Wed Oct 24 16:34:31 2007 +0000 @@ -0,0 +1,274 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + This file copyright 2006 Chris Cannam and QMUL. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#include "RealTimePluginTransform.h" + +#include "plugin/RealTimePluginFactory.h" +#include "plugin/RealTimePluginInstance.h" +#include "plugin/PluginXml.h" + +#include "data/model/Model.h" +#include "data/model/SparseTimeValueModel.h" +#include "data/model/DenseTimeValueModel.h" +#include "data/model/WritableWaveFileModel.h" +#include "data/model/WaveFileModel.h" + +#include <iostream> + +RealTimePluginTransform::RealTimePluginTransform(Model *inputModel, + QString pluginId, + const ExecutionContext &context, + QString configurationXml, + QString units, + int output) : + PluginTransform(inputModel, context), + m_pluginId(pluginId), + m_configurationXml(configurationXml), + m_units(units), + m_plugin(0), + m_outputNo(output) +{ + if (!m_context.blockSize) m_context.blockSize = 1024; + +// std::cerr << "RealTimePluginTransform::RealTimePluginTransform: plugin " << pluginId.toStdString() << ", output " << output << std::endl; + + RealTimePluginFactory *factory = + RealTimePluginFactory::instanceFor(pluginId); + + if (!factory) { + std::cerr << "RealTimePluginTransform: No factory available for plugin id \"" + << pluginId.toStdString() << "\"" << std::endl; + return; + } + + DenseTimeValueModel *input = getInput(); + if (!input) return; + + m_plugin = factory->instantiatePlugin(pluginId, 0, 0, + m_input->getSampleRate(), + m_context.blockSize, + input->getChannelCount()); + + if (!m_plugin) { + std::cerr << "RealTimePluginTransform: Failed to instantiate plugin \"" + << pluginId.toStdString() << "\"" << std::endl; + return; + } + + if (configurationXml != "") { + PluginXml(m_plugin).setParametersFromXml(configurationXml); + } + + if (m_outputNo >= 0 && + m_outputNo >= int(m_plugin->getControlOutputCount())) { + std::cerr << "RealTimePluginTransform: Plugin has fewer than desired " << m_outputNo << " control outputs" << std::endl; + return; + } + + if (m_outputNo == -1) { + + size_t outputChannels = m_plugin->getAudioOutputCount(); + if (outputChannels > input->getChannelCount()) { + outputChannels = input->getChannelCount(); + } + + WritableWaveFileModel *model = new WritableWaveFileModel + (input->getSampleRate(), outputChannels); + + m_output = model; + + } else { + + SparseTimeValueModel *model = new SparseTimeValueModel + (input->getSampleRate(), m_context.blockSize, 0.0, 0.0, false); + + if (units != "") model->setScaleUnits(units); + + m_output = model; + } +} + +RealTimePluginTransform::~RealTimePluginTransform() +{ + delete m_plugin; +} + +DenseTimeValueModel * +RealTimePluginTransform::getInput() +{ + DenseTimeValueModel *dtvm = + dynamic_cast<DenseTimeValueModel *>(getInputModel()); + if (!dtvm) { + std::cerr << "RealTimePluginTransform::getInput: WARNING: Input model is not conformable to DenseTimeValueModel" << std::endl; + } + return dtvm; +} + +void +RealTimePluginTransform::run() +{ + DenseTimeValueModel *input = getInput(); + if (!input) return; + + while (!input->isReady()) { + if (dynamic_cast<WaveFileModel *>(input)) break; // no need to wait + std::cerr << "RealTimePluginTransform::run: Waiting for input model to be ready..." << std::endl; + sleep(1); + } + + SparseTimeValueModel *stvm = dynamic_cast<SparseTimeValueModel *>(m_output); + WritableWaveFileModel *wwfm = dynamic_cast<WritableWaveFileModel *>(m_output); + if (!stvm && !wwfm) return; + + if (stvm && (m_outputNo >= int(m_plugin->getControlOutputCount()))) return; + + size_t sampleRate = input->getSampleRate(); + size_t channelCount = input->getChannelCount(); + if (!wwfm && m_context.channel != -1) channelCount = 1; + + long blockSize = m_plugin->getBufferSize(); + + float **inbufs = m_plugin->getAudioInputBuffers(); + + long startFrame = m_input->getStartFrame(); + long endFrame = m_input->getEndFrame(); + + long contextStart = m_context.startFrame; + long contextDuration = m_context.duration; + + if (contextStart == 0 || contextStart < startFrame) { + contextStart = startFrame; + } + + if (contextDuration == 0) { + contextDuration = endFrame - contextStart; + } + if (contextStart + contextDuration > endFrame) { + contextDuration = endFrame - contextStart; + } + + wwfm->setStartFrame(contextStart); + + long blockFrame = contextStart; + + long prevCompletion = 0; + + long latency = m_plugin->getLatency(); + + while (blockFrame < contextStart + contextDuration + latency && + !m_abandoned) { + + long completion = + (((blockFrame - contextStart) / blockSize) * 99) / + ((contextDuration) / blockSize); + + long got = 0; + + if (channelCount == 1) { + if (inbufs && inbufs[0]) { + got = input->getData + (m_context.channel, blockFrame, blockSize, inbufs[0]); + while (got < blockSize) { + inbufs[0][got++] = 0.0; + } + } + for (size_t ch = 1; ch < m_plugin->getAudioInputCount(); ++ch) { + for (long i = 0; i < blockSize; ++i) { + inbufs[ch][i] = inbufs[0][i]; + } + } + } else { + for (size_t ch = 0; ch < channelCount; ++ch) { + if (inbufs && inbufs[ch]) { + got = input->getData + (ch, blockFrame, blockSize, inbufs[ch]); + while (got < blockSize) { + inbufs[ch][got++] = 0.0; + } + } + } + for (size_t ch = channelCount; ch < m_plugin->getAudioInputCount(); ++ch) { + for (long i = 0; i < blockSize; ++i) { + inbufs[ch][i] = inbufs[ch % channelCount][i]; + } + } + } + +/* + std::cerr << "Input for plugin: " << m_plugin->getAudioInputCount() << " channels "<< std::endl; + + for (size_t ch = 0; ch < m_plugin->getAudioInputCount(); ++ch) { + std::cerr << "Input channel " << ch << std::endl; + for (size_t i = 0; i < 100; ++i) { + std::cerr << inbufs[ch][i] << " "; + if (isnan(inbufs[ch][i])) { + std::cerr << "\n\nWARNING: NaN in audio input" << std::endl; + } + } + } +*/ + + m_plugin->run(Vamp::RealTime::frame2RealTime(blockFrame, sampleRate)); + + if (stvm) { + + float value = m_plugin->getControlOutputValue(m_outputNo); + + long pointFrame = blockFrame; + if (pointFrame > latency) pointFrame -= latency; + else pointFrame = 0; + + stvm->addPoint(SparseTimeValueModel::Point + (pointFrame, value, "")); + + } else if (wwfm) { + + float **outbufs = m_plugin->getAudioOutputBuffers(); + + if (outbufs) { + + if (blockFrame >= latency) { + long writeSize = std::min + (blockSize, + contextStart + contextDuration + latency - blockFrame); + wwfm->addSamples(outbufs, writeSize); + } else if (blockFrame + blockSize >= latency) { + long offset = latency - blockFrame; + long count = blockSize - offset; + float **tmp = new float *[channelCount]; + for (size_t c = 0; c < channelCount; ++c) { + tmp[c] = outbufs[c] + offset; + } + wwfm->addSamples(tmp, count); + delete[] tmp; + } + } + } + + if (blockFrame == contextStart || completion > prevCompletion) { + if (stvm) stvm->setCompletion(completion); + if (wwfm) wwfm->setCompletion(completion); + prevCompletion = completion; + } + + blockFrame += blockSize; + } + + if (m_abandoned) return; + + if (stvm) stvm->setCompletion(100); + if (wwfm) wwfm->setCompletion(100); +} +