Chris@320: /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ Chris@320: Chris@320: /* Chris@320: Sonic Visualiser Chris@320: An audio file viewer and annotation editor. Chris@320: Centre for Digital Music, Queen Mary, University of London. Chris@320: This file copyright 2006 Chris Cannam and QMUL. Chris@320: Chris@320: This program is free software; you can redistribute it and/or Chris@320: modify it under the terms of the GNU General Public License as Chris@320: published by the Free Software Foundation; either version 2 of the Chris@320: License, or (at your option) any later version. See the file Chris@320: COPYING included with this distribution for more information. Chris@320: */ Chris@320: Chris@331: #include "RealTimeEffectModelTransformer.h" Chris@320: Chris@320: #include "plugin/RealTimePluginFactory.h" Chris@320: #include "plugin/RealTimePluginInstance.h" Chris@320: #include "plugin/PluginXml.h" Chris@320: Chris@320: #include "data/model/Model.h" Chris@320: #include "data/model/SparseTimeValueModel.h" Chris@320: #include "data/model/DenseTimeValueModel.h" Chris@320: #include "data/model/WritableWaveFileModel.h" Chris@320: #include "data/model/WaveFileModel.h" Chris@320: Chris@350: #include "TransformFactory.h" Chris@350: Chris@320: #include Chris@320: Chris@350: RealTimeEffectModelTransformer::RealTimeEffectModelTransformer(Input in, Chris@850: const Transform &t) : Chris@850: ModelTransformer(in, t), Chris@1582: m_plugin(nullptr) Chris@320: { Chris@850: Transform transform(t); Chris@850: if (!transform.getBlockSize()) { Chris@850: transform.setBlockSize(1024); Chris@850: m_transforms[0] = transform; Chris@850: } Chris@850: Chris@350: m_units = TransformFactory::getInstance()->getTransformUnits Chris@350: (transform.getIdentifier()); Chris@350: m_outputNo = Chris@350: (transform.getOutput() == "A") ? -1 : transform.getOutput().toInt(); Chris@350: Chris@350: QString pluginId = transform.getPluginIdentifier(); Chris@350: Chris@1496: SVDEBUG << "RealTimeEffectModelTransformer::RealTimeEffectModelTransformer: plugin " << pluginId << ", output " << transform.getOutput() << endl; Chris@320: Chris@320: RealTimePluginFactory *factory = Chris@1429: RealTimePluginFactory::instanceFor(pluginId); Chris@320: Chris@320: if (!factory) { Chris@1496: SVCERR << "RealTimeEffectModelTransformer: No factory available for plugin id \"" Chris@1496: << pluginId << "\"" << endl; Chris@1429: return; Chris@320: } Chris@320: Chris@1740: auto input = ModelById::getAs(getInputModel()); Chris@1740: if (!input) { Chris@1740: SVCERR << "RealTimeEffectModelTransformer: Input is absent or of wrong type" << endl; Chris@1740: return; Chris@1740: } Chris@320: Chris@320: m_plugin = factory->instantiatePlugin(pluginId, 0, 0, Chris@350: input->getSampleRate(), Chris@850: transform.getBlockSize(), Chris@320: input->getChannelCount()); Chris@320: Chris@320: if (!m_plugin) { Chris@1496: SVCERR << "RealTimeEffectModelTransformer: Failed to instantiate plugin \"" Chris@1496: << pluginId << "\"" << endl; Chris@1429: return; Chris@320: } Chris@320: Chris@850: TransformFactory::getInstance()->setPluginParameters(transform, m_plugin); Chris@320: Chris@320: if (m_outputNo >= 0 && Chris@320: m_outputNo >= int(m_plugin->getControlOutputCount())) { Chris@843: cerr << "RealTimeEffectModelTransformer: Plugin has fewer than desired " << m_outputNo << " control outputs" << endl; Chris@320: return; Chris@320: } Chris@320: Chris@320: if (m_outputNo == -1) { Chris@320: Chris@1039: int outputChannels = (int)m_plugin->getAudioOutputCount(); Chris@320: if (outputChannels > input->getChannelCount()) { Chris@320: outputChannels = input->getChannelCount(); Chris@320: } Chris@320: Chris@1740: auto model = std::make_shared Chris@320: (input->getSampleRate(), outputChannels); Chris@320: Chris@1752: m_outputs.push_back(ModelById::add(model)); Chris@320: Chris@320: } else { Chris@1429: Chris@1740: auto model = std::make_shared Chris@1740: (input->getSampleRate(), transform.getBlockSize(), Chris@1740: 0.0, 0.0, false); Chris@350: if (m_units != "") model->setScaleUnits(m_units); Chris@320: Chris@1752: m_outputs.push_back(ModelById::add(model)); Chris@320: } Chris@320: } Chris@320: Chris@331: RealTimeEffectModelTransformer::~RealTimeEffectModelTransformer() Chris@320: { Chris@320: } Chris@320: Chris@320: void Chris@331: RealTimeEffectModelTransformer::run() Chris@320: { Chris@1740: if (m_outputs.empty()) { Chris@1740: abandon(); Chris@1496: return; Chris@1496: } Chris@1740: Chris@1740: bool ready = false; Chris@1740: while (!ready && !m_abandoned) { Chris@1740: { // scope so as to release input shared_ptr before sleeping Chris@1740: auto input = ModelById::getAs(getInputModel()); Chris@1740: if (!input) { Chris@1740: abandon(); Chris@1740: return; Chris@1740: } Chris@1740: ready = input->isReady(); Chris@1740: } Chris@1740: if (!ready) { Chris@1740: SVDEBUG << "RealTimeEffectModelTransformer::run: Waiting for input model to be ready..." << endl; Chris@1740: usleep(500000); Chris@1740: } Chris@1740: } Chris@1740: if (m_abandoned) return; Chris@1740: Chris@1740: auto input = ModelById::getAs(getInputModel()); Chris@1740: if (!input) { Chris@1740: abandon(); Chris@1496: return; Chris@1496: } Chris@1740: Chris@1755: sv_samplerate_t sampleRate; Chris@1755: int channelCount; Chris@1755: sv_frame_t startFrame; Chris@1755: sv_frame_t endFrame; Chris@1755: Chris@1755: { // scope so as not to have this borrowed pointer retained around Chris@1755: // the edges of the process loop Chris@1755: auto input = ModelById::getAs(getInputModel()); Chris@1755: if (!input) { Chris@1755: abandon(); Chris@1755: return; Chris@1755: } Chris@1755: Chris@1755: sampleRate = input->getSampleRate(); Chris@1755: channelCount = input->getChannelCount(); Chris@1755: startFrame = input->getStartFrame(); Chris@1755: endFrame = input->getEndFrame(); Chris@1755: } Chris@1755: Chris@1740: auto stvm = ModelById::getAs(m_outputs[0]); Chris@1740: auto wwfm = ModelById::getAs(m_outputs[0]); Chris@320: Chris@1496: if (!stvm && !wwfm) { Chris@1496: return; Chris@1496: } Chris@320: Chris@1496: if (stvm && (m_outputNo >= int(m_plugin->getControlOutputCount()))) { Chris@1496: return; Chris@1496: } Chris@320: Chris@350: if (!wwfm && m_input.getChannel() != -1) channelCount = 1; Chris@320: Chris@1039: sv_frame_t blockSize = m_plugin->getBufferSize(); Chris@320: Chris@320: float **inbufs = m_plugin->getAudioInputBuffers(); Chris@320: Chris@850: Transform transform = m_transforms[0]; Chris@320: Chris@850: RealTime contextStartRT = transform.getStartTime(); Chris@850: RealTime contextDurationRT = transform.getDuration(); Chris@350: Chris@1039: sv_frame_t contextStart = Chris@350: RealTime::realTime2Frame(contextStartRT, sampleRate); Chris@350: Chris@1039: sv_frame_t contextDuration = Chris@350: RealTime::realTime2Frame(contextDurationRT, sampleRate); Chris@320: Chris@320: if (contextStart == 0 || contextStart < startFrame) { Chris@320: contextStart = startFrame; Chris@320: } Chris@320: Chris@320: if (contextDuration == 0) { Chris@320: contextDuration = endFrame - contextStart; Chris@320: } Chris@320: if (contextStart + contextDuration > endFrame) { Chris@320: contextDuration = endFrame - contextStart; Chris@320: } Chris@320: Chris@414: if (wwfm) { Chris@414: wwfm->setStartFrame(contextStart); Chris@414: } Chris@320: Chris@1039: sv_frame_t blockFrame = contextStart; Chris@320: Chris@1039: int prevCompletion = 0; Chris@320: Chris@1039: sv_frame_t latency = m_plugin->getLatency(); Chris@320: Chris@320: while (blockFrame < contextStart + contextDuration + latency && Chris@320: !m_abandoned) { Chris@320: Chris@1429: int completion = int Chris@1429: ((((blockFrame - contextStart) / blockSize) * 99) / Chris@1039: (1 + ((contextDuration) / blockSize))); Chris@320: Chris@1429: sv_frame_t got = 0; Chris@320: Chris@1755: auto input = ModelById::getAs(getInputModel()); Chris@1755: if (!input) { Chris@1755: abandon(); Chris@1755: return; Chris@1755: } Chris@1755: Chris@1429: if (channelCount == 1) { Chris@320: if (inbufs && inbufs[0]) { Chris@1096: auto data = input->getData Chris@1096: (m_input.getChannel(), blockFrame, blockSize); Chris@1096: got = data.size(); Chris@1096: for (sv_frame_t i = 0; i < got; ++i) { Chris@1096: inbufs[0][i] = data[i]; Chris@1096: } Chris@320: while (got < blockSize) { Chris@1096: inbufs[0][got++] = 0.f; Chris@320: } Chris@975: for (int ch = 1; ch < (int)m_plugin->getAudioInputCount(); ++ch) { Chris@1039: for (sv_frame_t i = 0; i < blockSize; ++i) { Chris@975: inbufs[ch][i] = inbufs[0][i]; Chris@975: } Chris@320: } Chris@320: } Chris@1429: } else { Chris@429: if (inbufs && inbufs[0]) { Chris@1096: auto data = input->getMultiChannelData Chris@1096: (0, channelCount - 1, blockFrame, blockSize); Chris@1096: if (!data.empty()) got = data[0].size(); Chris@1096: for (int ch = 0; ch < channelCount; ++ch) { Chris@1096: for (sv_frame_t i = 0; i < got; ++i) { Chris@1096: inbufs[ch][i] = data[ch][i]; Chris@1096: } Chris@1096: } Chris@429: while (got < blockSize) { Chris@930: for (int ch = 0; ch < channelCount; ++ch) { Chris@429: inbufs[ch][got] = 0.0; Chris@429: } Chris@429: ++got; Chris@320: } Chris@975: for (int ch = channelCount; ch < (int)m_plugin->getAudioInputCount(); ++ch) { Chris@1039: for (sv_frame_t i = 0; i < blockSize; ++i) { Chris@975: inbufs[ch][i] = inbufs[ch % channelCount][i]; Chris@975: } Chris@320: } Chris@320: } Chris@1429: } Chris@320: Chris@1040: m_plugin->run(RealTime::frame2RealTime(blockFrame, sampleRate)); Chris@320: Chris@320: if (stvm) { Chris@320: Chris@320: float value = m_plugin->getControlOutputValue(m_outputNo); Chris@320: Chris@1039: sv_frame_t pointFrame = blockFrame; Chris@320: if (pointFrame > latency) pointFrame -= latency; Chris@320: else pointFrame = 0; Chris@320: Chris@1651: stvm->add(Event(pointFrame, value, "")); Chris@320: Chris@320: } else if (wwfm) { Chris@320: Chris@320: float **outbufs = m_plugin->getAudioOutputBuffers(); Chris@320: Chris@320: if (outbufs) { Chris@320: Chris@320: if (blockFrame >= latency) { Chris@1039: sv_frame_t writeSize = std::min Chris@320: (blockSize, Chris@320: contextStart + contextDuration + latency - blockFrame); Chris@320: wwfm->addSamples(outbufs, writeSize); Chris@320: } else if (blockFrame + blockSize >= latency) { Chris@1039: sv_frame_t offset = latency - blockFrame; Chris@1039: sv_frame_t count = blockSize - offset; Chris@320: float **tmp = new float *[channelCount]; Chris@930: for (int c = 0; c < channelCount; ++c) { Chris@320: tmp[c] = outbufs[c] + offset; Chris@320: } Chris@320: wwfm->addSamples(tmp, count); Chris@320: delete[] tmp; Chris@320: } Chris@320: } Chris@320: } Chris@320: Chris@1429: if (blockFrame == contextStart || completion > prevCompletion) { Chris@1133: // This setCompletion is probably misusing the completion Chris@1133: // terminology, just as it was for WritableWaveFileModel Chris@1429: if (stvm) stvm->setCompletion(completion); Chris@1429: if (wwfm) wwfm->setWriteProportion(completion); Chris@1429: prevCompletion = completion; Chris@1429: } Chris@320: Chris@1429: blockFrame += blockSize; Chris@320: } Chris@320: Chris@320: if (m_abandoned) return; Chris@320: Chris@320: if (stvm) stvm->setCompletion(100); Chris@1133: if (wwfm) wwfm->writeComplete(); Chris@320: } Chris@320: