Chris@0: /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ Chris@0: Chris@0: /* Chris@0: Sonic Visualiser Chris@0: An audio file viewer and annotation editor. Chris@0: Centre for Digital Music, Queen Mary, University of London. Chris@77: This file copyright 2006 Chris Cannam and QMUL. Chris@0: Chris@0: This program is free software; you can redistribute it and/or Chris@0: modify it under the terms of the GNU General Public License as Chris@0: published by the Free Software Foundation; either version 2 of the Chris@0: License, or (at your option) any later version. See the file Chris@0: COPYING included with this distribution for more information. Chris@0: */ Chris@0: Chris@0: #include "RealTimePluginTransform.h" Chris@0: Chris@0: #include "plugin/RealTimePluginFactory.h" Chris@0: #include "plugin/RealTimePluginInstance.h" Chris@0: #include "plugin/PluginXml.h" Chris@0: Chris@1: #include "data/model/Model.h" Chris@1: #include "data/model/SparseTimeValueModel.h" Chris@1: #include "data/model/DenseTimeValueModel.h" Chris@39: #include "data/model/WritableWaveFileModel.h" Chris@55: #include "data/model/WaveFileModel.h" Chris@0: Chris@0: #include Chris@0: Chris@0: RealTimePluginTransform::RealTimePluginTransform(Model *inputModel, Chris@0: QString pluginId, Chris@27: const ExecutionContext &context, Chris@0: QString configurationXml, Chris@0: QString units, Chris@27: int output) : Chris@27: PluginTransform(inputModel, context), Chris@110: m_pluginId(pluginId), Chris@110: m_configurationXml(configurationXml), Chris@110: m_units(units), Chris@0: m_plugin(0), Chris@27: m_outputNo(output) Chris@0: { Chris@27: if (!m_context.blockSize) m_context.blockSize = 1024; Chris@26: Chris@140: // std::cerr << "RealTimePluginTransform::RealTimePluginTransform: plugin " << pluginId.toStdString() << ", output " << output << std::endl; Chris@0: Chris@0: RealTimePluginFactory *factory = Chris@0: RealTimePluginFactory::instanceFor(pluginId); Chris@0: Chris@0: if (!factory) { Chris@0: std::cerr << "RealTimePluginTransform: No factory available for plugin id \"" Chris@0: << pluginId.toStdString() << "\"" << std::endl; Chris@0: return; Chris@0: } Chris@0: Chris@0: DenseTimeValueModel *input = getInput(); Chris@0: if (!input) return; Chris@0: Chris@44: m_plugin = factory->instantiatePlugin(pluginId, 0, 0, Chris@44: m_input->getSampleRate(), Chris@27: m_context.blockSize, Chris@0: input->getChannelCount()); Chris@0: Chris@0: if (!m_plugin) { Chris@0: std::cerr << "RealTimePluginTransform: Failed to instantiate plugin \"" Chris@0: << pluginId.toStdString() << "\"" << std::endl; Chris@0: return; Chris@0: } Chris@0: Chris@0: if (configurationXml != "") { Chris@0: PluginXml(m_plugin).setParametersFromXml(configurationXml); Chris@0: } Chris@0: Chris@137: if (m_outputNo >= 0 && Chris@137: m_outputNo >= int(m_plugin->getControlOutputCount())) { Chris@0: std::cerr << "RealTimePluginTransform: Plugin has fewer than desired " << m_outputNo << " control outputs" << std::endl; Chris@0: return; Chris@0: } Chris@34: Chris@34: if (m_outputNo == -1) { Chris@34: Chris@52: size_t outputChannels = m_plugin->getAudioOutputCount(); Chris@52: if (outputChannels > input->getChannelCount()) { Chris@52: outputChannels = input->getChannelCount(); Chris@52: } Chris@52: Chris@39: WritableWaveFileModel *model = new WritableWaveFileModel Chris@52: (input->getSampleRate(), outputChannels); Chris@39: Chris@39: m_output = model; Chris@34: Chris@34: } else { Chris@0: Chris@34: SparseTimeValueModel *model = new SparseTimeValueModel Chris@34: (input->getSampleRate(), m_context.blockSize, 0.0, 0.0, false); Chris@0: Chris@34: if (units != "") model->setScaleUnits(units); Chris@0: Chris@34: m_output = model; Chris@34: } Chris@0: } Chris@0: Chris@0: RealTimePluginTransform::~RealTimePluginTransform() Chris@0: { Chris@0: delete m_plugin; Chris@0: } Chris@0: Chris@0: DenseTimeValueModel * Chris@0: RealTimePluginTransform::getInput() Chris@0: { Chris@0: DenseTimeValueModel *dtvm = Chris@0: dynamic_cast(getInputModel()); Chris@0: if (!dtvm) { Chris@0: std::cerr << "RealTimePluginTransform::getInput: WARNING: Input model is not conformable to DenseTimeValueModel" << std::endl; Chris@0: } Chris@0: return dtvm; Chris@0: } Chris@0: Chris@0: void Chris@0: RealTimePluginTransform::run() Chris@0: { Chris@0: DenseTimeValueModel *input = getInput(); Chris@0: if (!input) return; Chris@0: Chris@55: while (!input->isReady()) { Chris@55: if (dynamic_cast(input)) break; // no need to wait Chris@110: std::cerr << "RealTimePluginTransform::run: Waiting for input model to be ready..." << std::endl; Chris@55: sleep(1); Chris@55: } Chris@55: Chris@39: SparseTimeValueModel *stvm = dynamic_cast(m_output); Chris@39: WritableWaveFileModel *wwfm = dynamic_cast(m_output); Chris@39: if (!stvm && !wwfm) return; Chris@0: Chris@137: if (stvm && (m_outputNo >= int(m_plugin->getControlOutputCount()))) return; Chris@0: Chris@0: size_t sampleRate = input->getSampleRate(); Chris@137: size_t channelCount = input->getChannelCount(); Chris@44: if (!wwfm && m_context.channel != -1) channelCount = 1; Chris@0: Chris@184: long blockSize = m_plugin->getBufferSize(); Chris@0: Chris@110: float **inbufs = m_plugin->getAudioInputBuffers(); Chris@0: Chris@184: long startFrame = m_input->getStartFrame(); Chris@184: long endFrame = m_input->getEndFrame(); Chris@184: Chris@184: long contextStart = m_context.startFrame; Chris@184: long contextDuration = m_context.duration; Chris@0: Chris@184: if (contextStart == 0 || contextStart < startFrame) { Chris@184: contextStart = startFrame; Chris@184: } Chris@0: Chris@184: if (contextDuration == 0) { Chris@184: contextDuration = endFrame - contextStart; Chris@184: } Chris@184: if (contextStart + contextDuration > endFrame) { Chris@184: contextDuration = endFrame - contextStart; Chris@184: } Chris@39: Chris@184: wwfm->setStartFrame(contextStart); Chris@0: Chris@184: long blockFrame = contextStart; Chris@0: Chris@184: long prevCompletion = 0; Chris@184: Chris@184: long latency = m_plugin->getLatency(); Chris@184: Chris@184: while (blockFrame < contextStart + contextDuration + latency && Chris@184: !m_abandoned) { Chris@184: Chris@184: long completion = Chris@184: (((blockFrame - contextStart) / blockSize) * 99) / Chris@184: ((contextDuration) / blockSize); Chris@184: Chris@184: long got = 0; Chris@0: Chris@0: if (channelCount == 1) { Chris@110: if (inbufs && inbufs[0]) { Chris@184: got = input->getData Chris@184: (m_context.channel, blockFrame, blockSize, inbufs[0]); Chris@40: while (got < blockSize) { Chris@110: inbufs[0][got++] = 0.0; Chris@110: } Chris@110: } Chris@110: for (size_t ch = 1; ch < m_plugin->getAudioInputCount(); ++ch) { Chris@184: for (long i = 0; i < blockSize; ++i) { Chris@110: inbufs[ch][i] = inbufs[0][i]; Chris@0: } Chris@40: } Chris@0: } else { Chris@0: for (size_t ch = 0; ch < channelCount; ++ch) { Chris@110: if (inbufs && inbufs[ch]) { Chris@184: got = input->getData Chris@184: (ch, blockFrame, blockSize, inbufs[ch]); Chris@40: while (got < blockSize) { Chris@110: inbufs[ch][got++] = 0.0; Chris@40: } Chris@40: } Chris@0: } Chris@110: for (size_t ch = channelCount; ch < m_plugin->getAudioInputCount(); ++ch) { Chris@184: for (long i = 0; i < blockSize; ++i) { Chris@110: inbufs[ch][i] = inbufs[ch % channelCount][i]; Chris@110: } Chris@110: } Chris@0: } Chris@0: Chris@110: /* Chris@110: std::cerr << "Input for plugin: " << m_plugin->getAudioInputCount() << " channels "<< std::endl; Chris@110: Chris@110: for (size_t ch = 0; ch < m_plugin->getAudioInputCount(); ++ch) { Chris@110: std::cerr << "Input channel " << ch << std::endl; Chris@110: for (size_t i = 0; i < 100; ++i) { Chris@110: std::cerr << inbufs[ch][i] << " "; Chris@110: if (isnan(inbufs[ch][i])) { Chris@110: std::cerr << "\n\nWARNING: NaN in audio input" << std::endl; Chris@110: } Chris@110: } Chris@110: } Chris@110: */ Chris@110: Chris@0: m_plugin->run(Vamp::RealTime::frame2RealTime(blockFrame, sampleRate)); Chris@0: Chris@39: if (stvm) { Chris@0: Chris@39: float value = m_plugin->getControlOutputValue(m_outputNo); Chris@39: Chris@184: long pointFrame = blockFrame; Chris@39: if (pointFrame > latency) pointFrame -= latency; Chris@39: else pointFrame = 0; Chris@39: Chris@39: stvm->addPoint(SparseTimeValueModel::Point Chris@39: (pointFrame, value, "")); Chris@39: Chris@39: } else if (wwfm) { Chris@39: Chris@110: float **outbufs = m_plugin->getAudioOutputBuffers(); Chris@39: Chris@110: if (outbufs) { Chris@40: Chris@40: if (blockFrame >= latency) { Chris@184: long writeSize = std::min Chris@184: (blockSize, Chris@184: contextStart + contextDuration + latency - blockFrame); Chris@178: wwfm->addSamples(outbufs, writeSize); Chris@40: } else if (blockFrame + blockSize >= latency) { Chris@184: long offset = latency - blockFrame; Chris@184: long count = blockSize - offset; Chris@40: float **tmp = new float *[channelCount]; Chris@40: for (size_t c = 0; c < channelCount; ++c) { Chris@110: tmp[c] = outbufs[c] + offset; Chris@40: } Chris@40: wwfm->addSamples(tmp, count); Chris@40: delete[] tmp; Chris@39: } Chris@39: } Chris@39: } Chris@0: Chris@184: if (blockFrame == contextStart || completion > prevCompletion) { Chris@39: if (stvm) stvm->setCompletion(completion); Chris@55: if (wwfm) wwfm->setCompletion(completion); Chris@0: prevCompletion = completion; Chris@0: } Chris@0: Chris@0: blockFrame += blockSize; Chris@0: } Chris@118: Chris@118: if (m_abandoned) return; Chris@0: Chris@39: if (stvm) stvm->setCompletion(100); Chris@55: if (wwfm) wwfm->setCompletion(100); Chris@0: } Chris@0: