annotate transform/RealTimeEffectModelTransformer.cpp @ 558:1d7ebc05157e

* Some fairly simplistic code to set up layer type properties based on RDF data about feature types (both when running transforms and when importing features from RDF files).
author Chris Cannam
date Thu, 12 Feb 2009 15:26:43 +0000
parents b6dc6c7f402c
children f84f147572b9 b4a8d8221eaf
rev   line source
Chris@320 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@320 2
Chris@320 3 /*
Chris@320 4 Sonic Visualiser
Chris@320 5 An audio file viewer and annotation editor.
Chris@320 6 Centre for Digital Music, Queen Mary, University of London.
Chris@320 7 This file copyright 2006 Chris Cannam and QMUL.
Chris@320 8
Chris@320 9 This program is free software; you can redistribute it and/or
Chris@320 10 modify it under the terms of the GNU General Public License as
Chris@320 11 published by the Free Software Foundation; either version 2 of the
Chris@320 12 License, or (at your option) any later version. See the file
Chris@320 13 COPYING included with this distribution for more information.
Chris@320 14 */
Chris@320 15
Chris@331 16 #include "RealTimeEffectModelTransformer.h"
Chris@320 17
Chris@320 18 #include "plugin/RealTimePluginFactory.h"
Chris@320 19 #include "plugin/RealTimePluginInstance.h"
Chris@320 20 #include "plugin/PluginXml.h"
Chris@320 21
Chris@320 22 #include "data/model/Model.h"
Chris@320 23 #include "data/model/SparseTimeValueModel.h"
Chris@320 24 #include "data/model/DenseTimeValueModel.h"
Chris@320 25 #include "data/model/WritableWaveFileModel.h"
Chris@320 26 #include "data/model/WaveFileModel.h"
Chris@320 27
Chris@350 28 #include "TransformFactory.h"
Chris@350 29
Chris@320 30 #include <iostream>
Chris@320 31
Chris@350 32 RealTimeEffectModelTransformer::RealTimeEffectModelTransformer(Input in,
Chris@350 33 const Transform &transform) :
Chris@350 34 ModelTransformer(in, transform),
Chris@350 35 m_plugin(0)
Chris@320 36 {
Chris@350 37 m_units = TransformFactory::getInstance()->getTransformUnits
Chris@350 38 (transform.getIdentifier());
Chris@350 39 m_outputNo =
Chris@350 40 (transform.getOutput() == "A") ? -1 : transform.getOutput().toInt();
Chris@350 41
Chris@350 42 QString pluginId = transform.getPluginIdentifier();
Chris@350 43
Chris@350 44 if (!m_transform.getBlockSize()) m_transform.setBlockSize(1024);
Chris@320 45
Chris@331 46 // std::cerr << "RealTimeEffectModelTransformer::RealTimeEffectModelTransformer: plugin " << pluginId.toStdString() << ", output " << output << std::endl;
Chris@320 47
Chris@320 48 RealTimePluginFactory *factory =
Chris@320 49 RealTimePluginFactory::instanceFor(pluginId);
Chris@320 50
Chris@320 51 if (!factory) {
Chris@331 52 std::cerr << "RealTimeEffectModelTransformer: No factory available for plugin id \""
Chris@320 53 << pluginId.toStdString() << "\"" << std::endl;
Chris@320 54 return;
Chris@320 55 }
Chris@320 56
Chris@350 57 DenseTimeValueModel *input = getConformingInput();
Chris@320 58 if (!input) return;
Chris@320 59
Chris@320 60 m_plugin = factory->instantiatePlugin(pluginId, 0, 0,
Chris@350 61 input->getSampleRate(),
Chris@350 62 m_transform.getBlockSize(),
Chris@320 63 input->getChannelCount());
Chris@320 64
Chris@320 65 if (!m_plugin) {
Chris@331 66 std::cerr << "RealTimeEffectModelTransformer: Failed to instantiate plugin \""
Chris@320 67 << pluginId.toStdString() << "\"" << std::endl;
Chris@320 68 return;
Chris@320 69 }
Chris@320 70
Chris@350 71 TransformFactory::getInstance()->setPluginParameters(m_transform, m_plugin);
Chris@320 72
Chris@320 73 if (m_outputNo >= 0 &&
Chris@320 74 m_outputNo >= int(m_plugin->getControlOutputCount())) {
Chris@331 75 std::cerr << "RealTimeEffectModelTransformer: Plugin has fewer than desired " << m_outputNo << " control outputs" << std::endl;
Chris@320 76 return;
Chris@320 77 }
Chris@320 78
Chris@320 79 if (m_outputNo == -1) {
Chris@320 80
Chris@320 81 size_t outputChannels = m_plugin->getAudioOutputCount();
Chris@320 82 if (outputChannels > input->getChannelCount()) {
Chris@320 83 outputChannels = input->getChannelCount();
Chris@320 84 }
Chris@320 85
Chris@320 86 WritableWaveFileModel *model = new WritableWaveFileModel
Chris@320 87 (input->getSampleRate(), outputChannels);
Chris@320 88
Chris@320 89 m_output = model;
Chris@320 90
Chris@320 91 } else {
Chris@320 92
Chris@320 93 SparseTimeValueModel *model = new SparseTimeValueModel
Chris@350 94 (input->getSampleRate(), m_transform.getBlockSize(), 0.0, 0.0, false);
Chris@320 95
Chris@350 96 if (m_units != "") model->setScaleUnits(m_units);
Chris@320 97
Chris@320 98 m_output = model;
Chris@320 99 }
Chris@320 100 }
Chris@320 101
Chris@331 102 RealTimeEffectModelTransformer::~RealTimeEffectModelTransformer()
Chris@320 103 {
Chris@320 104 delete m_plugin;
Chris@320 105 }
Chris@320 106
Chris@320 107 DenseTimeValueModel *
Chris@350 108 RealTimeEffectModelTransformer::getConformingInput()
Chris@320 109 {
Chris@320 110 DenseTimeValueModel *dtvm =
Chris@320 111 dynamic_cast<DenseTimeValueModel *>(getInputModel());
Chris@320 112 if (!dtvm) {
Chris@350 113 std::cerr << "RealTimeEffectModelTransformer::getConformingInput: WARNING: Input model is not conformable to DenseTimeValueModel" << std::endl;
Chris@320 114 }
Chris@320 115 return dtvm;
Chris@320 116 }
Chris@320 117
Chris@320 118 void
Chris@331 119 RealTimeEffectModelTransformer::run()
Chris@320 120 {
Chris@350 121 DenseTimeValueModel *input = getConformingInput();
Chris@320 122 if (!input) return;
Chris@320 123
Chris@497 124 while (!input->isReady() && !m_abandoned) {
Chris@331 125 std::cerr << "RealTimeEffectModelTransformer::run: Waiting for input model to be ready..." << std::endl;
Chris@497 126 usleep(500000);
Chris@320 127 }
Chris@497 128 if (m_abandoned) return;
Chris@320 129
Chris@320 130 SparseTimeValueModel *stvm = dynamic_cast<SparseTimeValueModel *>(m_output);
Chris@320 131 WritableWaveFileModel *wwfm = dynamic_cast<WritableWaveFileModel *>(m_output);
Chris@320 132 if (!stvm && !wwfm) return;
Chris@320 133
Chris@320 134 if (stvm && (m_outputNo >= int(m_plugin->getControlOutputCount()))) return;
Chris@320 135
Chris@320 136 size_t sampleRate = input->getSampleRate();
Chris@320 137 size_t channelCount = input->getChannelCount();
Chris@350 138 if (!wwfm && m_input.getChannel() != -1) channelCount = 1;
Chris@320 139
Chris@320 140 long blockSize = m_plugin->getBufferSize();
Chris@320 141
Chris@320 142 float **inbufs = m_plugin->getAudioInputBuffers();
Chris@320 143
Chris@350 144 long startFrame = m_input.getModel()->getStartFrame();
Chris@350 145 long endFrame = m_input.getModel()->getEndFrame();
Chris@320 146
Chris@350 147 RealTime contextStartRT = m_transform.getStartTime();
Chris@350 148 RealTime contextDurationRT = m_transform.getDuration();
Chris@350 149
Chris@350 150 long contextStart =
Chris@350 151 RealTime::realTime2Frame(contextStartRT, sampleRate);
Chris@350 152
Chris@350 153 long contextDuration =
Chris@350 154 RealTime::realTime2Frame(contextDurationRT, sampleRate);
Chris@320 155
Chris@320 156 if (contextStart == 0 || contextStart < startFrame) {
Chris@320 157 contextStart = startFrame;
Chris@320 158 }
Chris@320 159
Chris@320 160 if (contextDuration == 0) {
Chris@320 161 contextDuration = endFrame - contextStart;
Chris@320 162 }
Chris@320 163 if (contextStart + contextDuration > endFrame) {
Chris@320 164 contextDuration = endFrame - contextStart;
Chris@320 165 }
Chris@320 166
Chris@414 167 if (wwfm) {
Chris@414 168 wwfm->setStartFrame(contextStart);
Chris@414 169 }
Chris@320 170
Chris@320 171 long blockFrame = contextStart;
Chris@320 172
Chris@320 173 long prevCompletion = 0;
Chris@320 174
Chris@320 175 long latency = m_plugin->getLatency();
Chris@320 176
Chris@320 177 while (blockFrame < contextStart + contextDuration + latency &&
Chris@320 178 !m_abandoned) {
Chris@320 179
Chris@320 180 long completion =
Chris@320 181 (((blockFrame - contextStart) / blockSize) * 99) /
Chris@320 182 ((contextDuration) / blockSize);
Chris@320 183
Chris@320 184 long got = 0;
Chris@320 185
Chris@320 186 if (channelCount == 1) {
Chris@320 187 if (inbufs && inbufs[0]) {
Chris@320 188 got = input->getData
Chris@350 189 (m_input.getChannel(), blockFrame, blockSize, inbufs[0]);
Chris@320 190 while (got < blockSize) {
Chris@320 191 inbufs[0][got++] = 0.0;
Chris@320 192 }
Chris@320 193 }
Chris@320 194 for (size_t ch = 1; ch < m_plugin->getAudioInputCount(); ++ch) {
Chris@320 195 for (long i = 0; i < blockSize; ++i) {
Chris@320 196 inbufs[ch][i] = inbufs[0][i];
Chris@320 197 }
Chris@320 198 }
Chris@320 199 } else {
Chris@429 200 if (inbufs && inbufs[0]) {
Chris@429 201 got = input->getData(0, channelCount - 1,
Chris@429 202 blockFrame, blockSize,
Chris@429 203 inbufs);
Chris@429 204 while (got < blockSize) {
Chris@429 205 for (size_t ch = 0; ch < channelCount; ++ch) {
Chris@429 206 inbufs[ch][got] = 0.0;
Chris@429 207 }
Chris@429 208 ++got;
Chris@320 209 }
Chris@429 210 }
Chris@320 211 for (size_t ch = channelCount; ch < m_plugin->getAudioInputCount(); ++ch) {
Chris@320 212 for (long i = 0; i < blockSize; ++i) {
Chris@320 213 inbufs[ch][i] = inbufs[ch % channelCount][i];
Chris@320 214 }
Chris@320 215 }
Chris@320 216 }
Chris@320 217
Chris@320 218 /*
Chris@320 219 std::cerr << "Input for plugin: " << m_plugin->getAudioInputCount() << " channels "<< std::endl;
Chris@320 220
Chris@320 221 for (size_t ch = 0; ch < m_plugin->getAudioInputCount(); ++ch) {
Chris@320 222 std::cerr << "Input channel " << ch << std::endl;
Chris@320 223 for (size_t i = 0; i < 100; ++i) {
Chris@320 224 std::cerr << inbufs[ch][i] << " ";
Chris@320 225 if (isnan(inbufs[ch][i])) {
Chris@320 226 std::cerr << "\n\nWARNING: NaN in audio input" << std::endl;
Chris@320 227 }
Chris@320 228 }
Chris@320 229 }
Chris@320 230 */
Chris@320 231
Chris@320 232 m_plugin->run(Vamp::RealTime::frame2RealTime(blockFrame, sampleRate));
Chris@320 233
Chris@320 234 if (stvm) {
Chris@320 235
Chris@320 236 float value = m_plugin->getControlOutputValue(m_outputNo);
Chris@320 237
Chris@320 238 long pointFrame = blockFrame;
Chris@320 239 if (pointFrame > latency) pointFrame -= latency;
Chris@320 240 else pointFrame = 0;
Chris@320 241
Chris@320 242 stvm->addPoint(SparseTimeValueModel::Point
Chris@320 243 (pointFrame, value, ""));
Chris@320 244
Chris@320 245 } else if (wwfm) {
Chris@320 246
Chris@320 247 float **outbufs = m_plugin->getAudioOutputBuffers();
Chris@320 248
Chris@320 249 if (outbufs) {
Chris@320 250
Chris@320 251 if (blockFrame >= latency) {
Chris@320 252 long writeSize = std::min
Chris@320 253 (blockSize,
Chris@320 254 contextStart + contextDuration + latency - blockFrame);
Chris@320 255 wwfm->addSamples(outbufs, writeSize);
Chris@320 256 } else if (blockFrame + blockSize >= latency) {
Chris@320 257 long offset = latency - blockFrame;
Chris@320 258 long count = blockSize - offset;
Chris@320 259 float **tmp = new float *[channelCount];
Chris@320 260 for (size_t c = 0; c < channelCount; ++c) {
Chris@320 261 tmp[c] = outbufs[c] + offset;
Chris@320 262 }
Chris@320 263 wwfm->addSamples(tmp, count);
Chris@320 264 delete[] tmp;
Chris@320 265 }
Chris@320 266 }
Chris@320 267 }
Chris@320 268
Chris@320 269 if (blockFrame == contextStart || completion > prevCompletion) {
Chris@320 270 if (stvm) stvm->setCompletion(completion);
Chris@320 271 if (wwfm) wwfm->setCompletion(completion);
Chris@320 272 prevCompletion = completion;
Chris@320 273 }
Chris@320 274
Chris@320 275 blockFrame += blockSize;
Chris@320 276 }
Chris@320 277
Chris@320 278 if (m_abandoned) return;
Chris@320 279
Chris@320 280 if (stvm) stvm->setCompletion(100);
Chris@320 281 if (wwfm) wwfm->setCompletion(100);
Chris@320 282 }
Chris@320 283