annotate transform/RealTimeEffectModelTransformer.cpp @ 489:82ab61fa9223

* Reorganise our sparql queries on the basis that Redland must be available, not only optional. So for anything querying the pool of data about plugins, we use a single datastore and model which is initialised at the outset by PluginRDFIndexer and then queried directly; for anything that "reads from a file" (e.g. loading annotations) we query directly using Rasqal, going to the datastore when we need additional plugin-related information. This may improve performance, but mostly it simplifies the code and fixes a serious issue with RDF import in the previous versions (namely that multiple sequential RDF imports would end up sharing the same RDF data pool!)
author Chris Cannam
date Fri, 21 Nov 2008 16:12:29 +0000
parents 6036b38177e2
children b6dc6c7f402c
rev   line source
Chris@320 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@320 2
Chris@320 3 /*
Chris@320 4 Sonic Visualiser
Chris@320 5 An audio file viewer and annotation editor.
Chris@320 6 Centre for Digital Music, Queen Mary, University of London.
Chris@320 7 This file copyright 2006 Chris Cannam and QMUL.
Chris@320 8
Chris@320 9 This program is free software; you can redistribute it and/or
Chris@320 10 modify it under the terms of the GNU General Public License as
Chris@320 11 published by the Free Software Foundation; either version 2 of the
Chris@320 12 License, or (at your option) any later version. See the file
Chris@320 13 COPYING included with this distribution for more information.
Chris@320 14 */
Chris@320 15
Chris@331 16 #include "RealTimeEffectModelTransformer.h"
Chris@320 17
Chris@320 18 #include "plugin/RealTimePluginFactory.h"
Chris@320 19 #include "plugin/RealTimePluginInstance.h"
Chris@320 20 #include "plugin/PluginXml.h"
Chris@320 21
Chris@320 22 #include "data/model/Model.h"
Chris@320 23 #include "data/model/SparseTimeValueModel.h"
Chris@320 24 #include "data/model/DenseTimeValueModel.h"
Chris@320 25 #include "data/model/WritableWaveFileModel.h"
Chris@320 26 #include "data/model/WaveFileModel.h"
Chris@320 27
Chris@350 28 #include "TransformFactory.h"
Chris@350 29
Chris@320 30 #include <iostream>
Chris@320 31
Chris@350 32 RealTimeEffectModelTransformer::RealTimeEffectModelTransformer(Input in,
Chris@350 33 const Transform &transform) :
Chris@350 34 ModelTransformer(in, transform),
Chris@350 35 m_plugin(0)
Chris@320 36 {
Chris@350 37 m_units = TransformFactory::getInstance()->getTransformUnits
Chris@350 38 (transform.getIdentifier());
Chris@350 39 m_outputNo =
Chris@350 40 (transform.getOutput() == "A") ? -1 : transform.getOutput().toInt();
Chris@350 41
Chris@350 42 QString pluginId = transform.getPluginIdentifier();
Chris@350 43
Chris@350 44 if (!m_transform.getBlockSize()) m_transform.setBlockSize(1024);
Chris@320 45
Chris@331 46 // std::cerr << "RealTimeEffectModelTransformer::RealTimeEffectModelTransformer: plugin " << pluginId.toStdString() << ", output " << output << std::endl;
Chris@320 47
Chris@320 48 RealTimePluginFactory *factory =
Chris@320 49 RealTimePluginFactory::instanceFor(pluginId);
Chris@320 50
Chris@320 51 if (!factory) {
Chris@331 52 std::cerr << "RealTimeEffectModelTransformer: No factory available for plugin id \""
Chris@320 53 << pluginId.toStdString() << "\"" << std::endl;
Chris@320 54 return;
Chris@320 55 }
Chris@320 56
Chris@350 57 DenseTimeValueModel *input = getConformingInput();
Chris@320 58 if (!input) return;
Chris@320 59
Chris@320 60 m_plugin = factory->instantiatePlugin(pluginId, 0, 0,
Chris@350 61 input->getSampleRate(),
Chris@350 62 m_transform.getBlockSize(),
Chris@320 63 input->getChannelCount());
Chris@320 64
Chris@320 65 if (!m_plugin) {
Chris@331 66 std::cerr << "RealTimeEffectModelTransformer: Failed to instantiate plugin \""
Chris@320 67 << pluginId.toStdString() << "\"" << std::endl;
Chris@320 68 return;
Chris@320 69 }
Chris@320 70
Chris@350 71 TransformFactory::getInstance()->setPluginParameters(m_transform, m_plugin);
Chris@320 72
Chris@320 73 if (m_outputNo >= 0 &&
Chris@320 74 m_outputNo >= int(m_plugin->getControlOutputCount())) {
Chris@331 75 std::cerr << "RealTimeEffectModelTransformer: Plugin has fewer than desired " << m_outputNo << " control outputs" << std::endl;
Chris@320 76 return;
Chris@320 77 }
Chris@320 78
Chris@320 79 if (m_outputNo == -1) {
Chris@320 80
Chris@320 81 size_t outputChannels = m_plugin->getAudioOutputCount();
Chris@320 82 if (outputChannels > input->getChannelCount()) {
Chris@320 83 outputChannels = input->getChannelCount();
Chris@320 84 }
Chris@320 85
Chris@320 86 WritableWaveFileModel *model = new WritableWaveFileModel
Chris@320 87 (input->getSampleRate(), outputChannels);
Chris@320 88
Chris@320 89 m_output = model;
Chris@320 90
Chris@320 91 } else {
Chris@320 92
Chris@320 93 SparseTimeValueModel *model = new SparseTimeValueModel
Chris@350 94 (input->getSampleRate(), m_transform.getBlockSize(), 0.0, 0.0, false);
Chris@320 95
Chris@350 96 if (m_units != "") model->setScaleUnits(m_units);
Chris@320 97
Chris@320 98 m_output = model;
Chris@320 99 }
Chris@320 100 }
Chris@320 101
Chris@331 102 RealTimeEffectModelTransformer::~RealTimeEffectModelTransformer()
Chris@320 103 {
Chris@320 104 delete m_plugin;
Chris@320 105 }
Chris@320 106
Chris@320 107 DenseTimeValueModel *
Chris@350 108 RealTimeEffectModelTransformer::getConformingInput()
Chris@320 109 {
Chris@320 110 DenseTimeValueModel *dtvm =
Chris@320 111 dynamic_cast<DenseTimeValueModel *>(getInputModel());
Chris@320 112 if (!dtvm) {
Chris@350 113 std::cerr << "RealTimeEffectModelTransformer::getConformingInput: WARNING: Input model is not conformable to DenseTimeValueModel" << std::endl;
Chris@320 114 }
Chris@320 115 return dtvm;
Chris@320 116 }
Chris@320 117
Chris@320 118 void
Chris@331 119 RealTimeEffectModelTransformer::run()
Chris@320 120 {
Chris@350 121 DenseTimeValueModel *input = getConformingInput();
Chris@320 122 if (!input) return;
Chris@320 123
Chris@320 124 while (!input->isReady()) {
Chris@331 125 std::cerr << "RealTimeEffectModelTransformer::run: Waiting for input model to be ready..." << std::endl;
Chris@320 126 sleep(1);
Chris@320 127 }
Chris@320 128
Chris@320 129 SparseTimeValueModel *stvm = dynamic_cast<SparseTimeValueModel *>(m_output);
Chris@320 130 WritableWaveFileModel *wwfm = dynamic_cast<WritableWaveFileModel *>(m_output);
Chris@320 131 if (!stvm && !wwfm) return;
Chris@320 132
Chris@320 133 if (stvm && (m_outputNo >= int(m_plugin->getControlOutputCount()))) return;
Chris@320 134
Chris@320 135 size_t sampleRate = input->getSampleRate();
Chris@320 136 size_t channelCount = input->getChannelCount();
Chris@350 137 if (!wwfm && m_input.getChannel() != -1) channelCount = 1;
Chris@320 138
Chris@320 139 long blockSize = m_plugin->getBufferSize();
Chris@320 140
Chris@320 141 float **inbufs = m_plugin->getAudioInputBuffers();
Chris@320 142
Chris@350 143 long startFrame = m_input.getModel()->getStartFrame();
Chris@350 144 long endFrame = m_input.getModel()->getEndFrame();
Chris@320 145
Chris@350 146 RealTime contextStartRT = m_transform.getStartTime();
Chris@350 147 RealTime contextDurationRT = m_transform.getDuration();
Chris@350 148
Chris@350 149 long contextStart =
Chris@350 150 RealTime::realTime2Frame(contextStartRT, sampleRate);
Chris@350 151
Chris@350 152 long contextDuration =
Chris@350 153 RealTime::realTime2Frame(contextDurationRT, sampleRate);
Chris@320 154
Chris@320 155 if (contextStart == 0 || contextStart < startFrame) {
Chris@320 156 contextStart = startFrame;
Chris@320 157 }
Chris@320 158
Chris@320 159 if (contextDuration == 0) {
Chris@320 160 contextDuration = endFrame - contextStart;
Chris@320 161 }
Chris@320 162 if (contextStart + contextDuration > endFrame) {
Chris@320 163 contextDuration = endFrame - contextStart;
Chris@320 164 }
Chris@320 165
Chris@414 166 if (wwfm) {
Chris@414 167 wwfm->setStartFrame(contextStart);
Chris@414 168 }
Chris@320 169
Chris@320 170 long blockFrame = contextStart;
Chris@320 171
Chris@320 172 long prevCompletion = 0;
Chris@320 173
Chris@320 174 long latency = m_plugin->getLatency();
Chris@320 175
Chris@320 176 while (blockFrame < contextStart + contextDuration + latency &&
Chris@320 177 !m_abandoned) {
Chris@320 178
Chris@320 179 long completion =
Chris@320 180 (((blockFrame - contextStart) / blockSize) * 99) /
Chris@320 181 ((contextDuration) / blockSize);
Chris@320 182
Chris@320 183 long got = 0;
Chris@320 184
Chris@320 185 if (channelCount == 1) {
Chris@320 186 if (inbufs && inbufs[0]) {
Chris@320 187 got = input->getData
Chris@350 188 (m_input.getChannel(), blockFrame, blockSize, inbufs[0]);
Chris@320 189 while (got < blockSize) {
Chris@320 190 inbufs[0][got++] = 0.0;
Chris@320 191 }
Chris@320 192 }
Chris@320 193 for (size_t ch = 1; ch < m_plugin->getAudioInputCount(); ++ch) {
Chris@320 194 for (long i = 0; i < blockSize; ++i) {
Chris@320 195 inbufs[ch][i] = inbufs[0][i];
Chris@320 196 }
Chris@320 197 }
Chris@320 198 } else {
Chris@429 199 if (inbufs && inbufs[0]) {
Chris@429 200 got = input->getData(0, channelCount - 1,
Chris@429 201 blockFrame, blockSize,
Chris@429 202 inbufs);
Chris@429 203 while (got < blockSize) {
Chris@429 204 for (size_t ch = 0; ch < channelCount; ++ch) {
Chris@429 205 inbufs[ch][got] = 0.0;
Chris@429 206 }
Chris@429 207 ++got;
Chris@320 208 }
Chris@429 209 }
Chris@320 210 for (size_t ch = channelCount; ch < m_plugin->getAudioInputCount(); ++ch) {
Chris@320 211 for (long i = 0; i < blockSize; ++i) {
Chris@320 212 inbufs[ch][i] = inbufs[ch % channelCount][i];
Chris@320 213 }
Chris@320 214 }
Chris@320 215 }
Chris@320 216
Chris@320 217 /*
Chris@320 218 std::cerr << "Input for plugin: " << m_plugin->getAudioInputCount() << " channels "<< std::endl;
Chris@320 219
Chris@320 220 for (size_t ch = 0; ch < m_plugin->getAudioInputCount(); ++ch) {
Chris@320 221 std::cerr << "Input channel " << ch << std::endl;
Chris@320 222 for (size_t i = 0; i < 100; ++i) {
Chris@320 223 std::cerr << inbufs[ch][i] << " ";
Chris@320 224 if (isnan(inbufs[ch][i])) {
Chris@320 225 std::cerr << "\n\nWARNING: NaN in audio input" << std::endl;
Chris@320 226 }
Chris@320 227 }
Chris@320 228 }
Chris@320 229 */
Chris@320 230
Chris@320 231 m_plugin->run(Vamp::RealTime::frame2RealTime(blockFrame, sampleRate));
Chris@320 232
Chris@320 233 if (stvm) {
Chris@320 234
Chris@320 235 float value = m_plugin->getControlOutputValue(m_outputNo);
Chris@320 236
Chris@320 237 long pointFrame = blockFrame;
Chris@320 238 if (pointFrame > latency) pointFrame -= latency;
Chris@320 239 else pointFrame = 0;
Chris@320 240
Chris@320 241 stvm->addPoint(SparseTimeValueModel::Point
Chris@320 242 (pointFrame, value, ""));
Chris@320 243
Chris@320 244 } else if (wwfm) {
Chris@320 245
Chris@320 246 float **outbufs = m_plugin->getAudioOutputBuffers();
Chris@320 247
Chris@320 248 if (outbufs) {
Chris@320 249
Chris@320 250 if (blockFrame >= latency) {
Chris@320 251 long writeSize = std::min
Chris@320 252 (blockSize,
Chris@320 253 contextStart + contextDuration + latency - blockFrame);
Chris@320 254 wwfm->addSamples(outbufs, writeSize);
Chris@320 255 } else if (blockFrame + blockSize >= latency) {
Chris@320 256 long offset = latency - blockFrame;
Chris@320 257 long count = blockSize - offset;
Chris@320 258 float **tmp = new float *[channelCount];
Chris@320 259 for (size_t c = 0; c < channelCount; ++c) {
Chris@320 260 tmp[c] = outbufs[c] + offset;
Chris@320 261 }
Chris@320 262 wwfm->addSamples(tmp, count);
Chris@320 263 delete[] tmp;
Chris@320 264 }
Chris@320 265 }
Chris@320 266 }
Chris@320 267
Chris@320 268 if (blockFrame == contextStart || completion > prevCompletion) {
Chris@320 269 if (stvm) stvm->setCompletion(completion);
Chris@320 270 if (wwfm) wwfm->setCompletion(completion);
Chris@320 271 prevCompletion = completion;
Chris@320 272 }
Chris@320 273
Chris@320 274 blockFrame += blockSize;
Chris@320 275 }
Chris@320 276
Chris@320 277 if (m_abandoned) return;
Chris@320 278
Chris@320 279 if (stvm) stvm->setCompletion(100);
Chris@320 280 if (wwfm) wwfm->setCompletion(100);
Chris@320 281 }
Chris@320 282