annotate transform/RealTimeEffectModelTransformer.cpp @ 1429:48e9f538e6e9

Untabify
author Chris Cannam
date Thu, 01 Mar 2018 18:02:22 +0000
parents e994747fb9dd
children fde8c497373f
rev   line source
Chris@320 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@320 2
Chris@320 3 /*
Chris@320 4 Sonic Visualiser
Chris@320 5 An audio file viewer and annotation editor.
Chris@320 6 Centre for Digital Music, Queen Mary, University of London.
Chris@320 7 This file copyright 2006 Chris Cannam and QMUL.
Chris@320 8
Chris@320 9 This program is free software; you can redistribute it and/or
Chris@320 10 modify it under the terms of the GNU General Public License as
Chris@320 11 published by the Free Software Foundation; either version 2 of the
Chris@320 12 License, or (at your option) any later version. See the file
Chris@320 13 COPYING included with this distribution for more information.
Chris@320 14 */
Chris@320 15
Chris@331 16 #include "RealTimeEffectModelTransformer.h"
Chris@320 17
Chris@320 18 #include "plugin/RealTimePluginFactory.h"
Chris@320 19 #include "plugin/RealTimePluginInstance.h"
Chris@320 20 #include "plugin/PluginXml.h"
Chris@320 21
Chris@320 22 #include "data/model/Model.h"
Chris@320 23 #include "data/model/SparseTimeValueModel.h"
Chris@320 24 #include "data/model/DenseTimeValueModel.h"
Chris@320 25 #include "data/model/WritableWaveFileModel.h"
Chris@320 26 #include "data/model/WaveFileModel.h"
Chris@320 27
Chris@350 28 #include "TransformFactory.h"
Chris@350 29
Chris@320 30 #include <iostream>
Chris@320 31
Chris@350 32 RealTimeEffectModelTransformer::RealTimeEffectModelTransformer(Input in,
Chris@850 33 const Transform &t) :
Chris@850 34 ModelTransformer(in, t),
Chris@350 35 m_plugin(0)
Chris@320 36 {
Chris@850 37 Transform transform(t);
Chris@850 38 if (!transform.getBlockSize()) {
Chris@850 39 transform.setBlockSize(1024);
Chris@850 40 m_transforms[0] = transform;
Chris@850 41 }
Chris@850 42
Chris@350 43 m_units = TransformFactory::getInstance()->getTransformUnits
Chris@350 44 (transform.getIdentifier());
Chris@350 45 m_outputNo =
Chris@350 46 (transform.getOutput() == "A") ? -1 : transform.getOutput().toInt();
Chris@350 47
Chris@350 48 QString pluginId = transform.getPluginIdentifier();
Chris@350 49
Chris@690 50 // SVDEBUG << "RealTimeEffectModelTransformer::RealTimeEffectModelTransformer: plugin " << pluginId << ", output " << output << endl;
Chris@320 51
Chris@320 52 RealTimePluginFactory *factory =
Chris@1429 53 RealTimePluginFactory::instanceFor(pluginId);
Chris@320 54
Chris@320 55 if (!factory) {
Chris@1429 56 cerr << "RealTimeEffectModelTransformer: No factory available for plugin id \""
Chris@1429 57 << pluginId << "\"" << endl;
Chris@1429 58 return;
Chris@320 59 }
Chris@320 60
Chris@350 61 DenseTimeValueModel *input = getConformingInput();
Chris@320 62 if (!input) return;
Chris@320 63
Chris@320 64 m_plugin = factory->instantiatePlugin(pluginId, 0, 0,
Chris@350 65 input->getSampleRate(),
Chris@850 66 transform.getBlockSize(),
Chris@320 67 input->getChannelCount());
Chris@320 68
Chris@320 69 if (!m_plugin) {
Chris@1429 70 cerr << "RealTimeEffectModelTransformer: Failed to instantiate plugin \""
Chris@850 71 << pluginId << "\"" << endl;
Chris@1429 72 return;
Chris@320 73 }
Chris@320 74
Chris@850 75 TransformFactory::getInstance()->setPluginParameters(transform, m_plugin);
Chris@320 76
Chris@320 77 if (m_outputNo >= 0 &&
Chris@320 78 m_outputNo >= int(m_plugin->getControlOutputCount())) {
Chris@843 79 cerr << "RealTimeEffectModelTransformer: Plugin has fewer than desired " << m_outputNo << " control outputs" << endl;
Chris@320 80 return;
Chris@320 81 }
Chris@320 82
Chris@320 83 if (m_outputNo == -1) {
Chris@320 84
Chris@1039 85 int outputChannels = (int)m_plugin->getAudioOutputCount();
Chris@320 86 if (outputChannels > input->getChannelCount()) {
Chris@320 87 outputChannels = input->getChannelCount();
Chris@320 88 }
Chris@320 89
Chris@320 90 WritableWaveFileModel *model = new WritableWaveFileModel
Chris@320 91 (input->getSampleRate(), outputChannels);
Chris@320 92
Chris@849 93 m_outputs.push_back(model);
Chris@320 94
Chris@320 95 } else {
Chris@1429 96
Chris@320 97 SparseTimeValueModel *model = new SparseTimeValueModel
Chris@850 98 (input->getSampleRate(), transform.getBlockSize(), 0.0, 0.0, false);
Chris@320 99
Chris@350 100 if (m_units != "") model->setScaleUnits(m_units);
Chris@320 101
Chris@849 102 m_outputs.push_back(model);
Chris@320 103 }
Chris@320 104 }
Chris@320 105
Chris@331 106 RealTimeEffectModelTransformer::~RealTimeEffectModelTransformer()
Chris@320 107 {
Chris@320 108 delete m_plugin;
Chris@320 109 }
Chris@320 110
Chris@320 111 DenseTimeValueModel *
Chris@350 112 RealTimeEffectModelTransformer::getConformingInput()
Chris@320 113 {
Chris@320 114 DenseTimeValueModel *dtvm =
Chris@1429 115 dynamic_cast<DenseTimeValueModel *>(getInputModel());
Chris@320 116 if (!dtvm) {
Chris@1429 117 SVDEBUG << "RealTimeEffectModelTransformer::getConformingInput: WARNING: Input model is not conformable to DenseTimeValueModel" << endl;
Chris@320 118 }
Chris@320 119 return dtvm;
Chris@320 120 }
Chris@320 121
Chris@320 122 void
Chris@331 123 RealTimeEffectModelTransformer::run()
Chris@320 124 {
Chris@350 125 DenseTimeValueModel *input = getConformingInput();
Chris@320 126 if (!input) return;
Chris@320 127
Chris@497 128 while (!input->isReady() && !m_abandoned) {
Chris@690 129 SVDEBUG << "RealTimeEffectModelTransformer::run: Waiting for input model to be ready..." << endl;
Chris@497 130 usleep(500000);
Chris@320 131 }
Chris@497 132 if (m_abandoned) return;
Chris@320 133
Chris@849 134 SparseTimeValueModel *stvm = dynamic_cast<SparseTimeValueModel *>(m_outputs[0]);
Chris@849 135 WritableWaveFileModel *wwfm = dynamic_cast<WritableWaveFileModel *>(m_outputs[0]);
Chris@320 136 if (!stvm && !wwfm) return;
Chris@320 137
Chris@320 138 if (stvm && (m_outputNo >= int(m_plugin->getControlOutputCount()))) return;
Chris@320 139
Chris@1040 140 sv_samplerate_t sampleRate = input->getSampleRate();
Chris@930 141 int channelCount = input->getChannelCount();
Chris@350 142 if (!wwfm && m_input.getChannel() != -1) channelCount = 1;
Chris@320 143
Chris@1039 144 sv_frame_t blockSize = m_plugin->getBufferSize();
Chris@320 145
Chris@320 146 float **inbufs = m_plugin->getAudioInputBuffers();
Chris@320 147
Chris@1039 148 sv_frame_t startFrame = m_input.getModel()->getStartFrame();
Chris@1039 149 sv_frame_t endFrame = m_input.getModel()->getEndFrame();
Chris@850 150
Chris@850 151 Transform transform = m_transforms[0];
Chris@320 152
Chris@850 153 RealTime contextStartRT = transform.getStartTime();
Chris@850 154 RealTime contextDurationRT = transform.getDuration();
Chris@350 155
Chris@1039 156 sv_frame_t contextStart =
Chris@350 157 RealTime::realTime2Frame(contextStartRT, sampleRate);
Chris@350 158
Chris@1039 159 sv_frame_t contextDuration =
Chris@350 160 RealTime::realTime2Frame(contextDurationRT, sampleRate);
Chris@320 161
Chris@320 162 if (contextStart == 0 || contextStart < startFrame) {
Chris@320 163 contextStart = startFrame;
Chris@320 164 }
Chris@320 165
Chris@320 166 if (contextDuration == 0) {
Chris@320 167 contextDuration = endFrame - contextStart;
Chris@320 168 }
Chris@320 169 if (contextStart + contextDuration > endFrame) {
Chris@320 170 contextDuration = endFrame - contextStart;
Chris@320 171 }
Chris@320 172
Chris@414 173 if (wwfm) {
Chris@414 174 wwfm->setStartFrame(contextStart);
Chris@414 175 }
Chris@320 176
Chris@1039 177 sv_frame_t blockFrame = contextStart;
Chris@320 178
Chris@1039 179 int prevCompletion = 0;
Chris@320 180
Chris@1039 181 sv_frame_t latency = m_plugin->getLatency();
Chris@320 182
Chris@320 183 while (blockFrame < contextStart + contextDuration + latency &&
Chris@320 184 !m_abandoned) {
Chris@320 185
Chris@1429 186 int completion = int
Chris@1429 187 ((((blockFrame - contextStart) / blockSize) * 99) /
Chris@1039 188 (1 + ((contextDuration) / blockSize)));
Chris@320 189
Chris@1429 190 sv_frame_t got = 0;
Chris@320 191
Chris@1429 192 if (channelCount == 1) {
Chris@320 193 if (inbufs && inbufs[0]) {
Chris@1096 194 auto data = input->getData
Chris@1096 195 (m_input.getChannel(), blockFrame, blockSize);
Chris@1096 196 got = data.size();
Chris@1096 197 for (sv_frame_t i = 0; i < got; ++i) {
Chris@1096 198 inbufs[0][i] = data[i];
Chris@1096 199 }
Chris@320 200 while (got < blockSize) {
Chris@1096 201 inbufs[0][got++] = 0.f;
Chris@320 202 }
Chris@975 203 for (int ch = 1; ch < (int)m_plugin->getAudioInputCount(); ++ch) {
Chris@1039 204 for (sv_frame_t i = 0; i < blockSize; ++i) {
Chris@975 205 inbufs[ch][i] = inbufs[0][i];
Chris@975 206 }
Chris@320 207 }
Chris@320 208 }
Chris@1429 209 } else {
Chris@429 210 if (inbufs && inbufs[0]) {
Chris@1096 211 auto data = input->getMultiChannelData
Chris@1096 212 (0, channelCount - 1, blockFrame, blockSize);
Chris@1096 213 if (!data.empty()) got = data[0].size();
Chris@1096 214 for (int ch = 0; ch < channelCount; ++ch) {
Chris@1096 215 for (sv_frame_t i = 0; i < got; ++i) {
Chris@1096 216 inbufs[ch][i] = data[ch][i];
Chris@1096 217 }
Chris@1096 218 }
Chris@429 219 while (got < blockSize) {
Chris@930 220 for (int ch = 0; ch < channelCount; ++ch) {
Chris@429 221 inbufs[ch][got] = 0.0;
Chris@429 222 }
Chris@429 223 ++got;
Chris@320 224 }
Chris@975 225 for (int ch = channelCount; ch < (int)m_plugin->getAudioInputCount(); ++ch) {
Chris@1039 226 for (sv_frame_t i = 0; i < blockSize; ++i) {
Chris@975 227 inbufs[ch][i] = inbufs[ch % channelCount][i];
Chris@975 228 }
Chris@320 229 }
Chris@320 230 }
Chris@1429 231 }
Chris@320 232
Chris@320 233 /*
Chris@843 234 cerr << "Input for plugin: " << m_plugin->getAudioInputCount() << " channels "<< endl;
Chris@320 235
Chris@930 236 for (int ch = 0; ch < m_plugin->getAudioInputCount(); ++ch) {
Chris@843 237 cerr << "Input channel " << ch << endl;
Chris@930 238 for (int i = 0; i < 100; ++i) {
Chris@843 239 cerr << inbufs[ch][i] << " ";
Chris@320 240 if (isnan(inbufs[ch][i])) {
Chris@843 241 cerr << "\n\nWARNING: NaN in audio input" << endl;
Chris@320 242 }
Chris@320 243 }
Chris@320 244 }
Chris@320 245 */
Chris@320 246
Chris@1040 247 m_plugin->run(RealTime::frame2RealTime(blockFrame, sampleRate));
Chris@320 248
Chris@320 249 if (stvm) {
Chris@320 250
Chris@320 251 float value = m_plugin->getControlOutputValue(m_outputNo);
Chris@320 252
Chris@1039 253 sv_frame_t pointFrame = blockFrame;
Chris@320 254 if (pointFrame > latency) pointFrame -= latency;
Chris@320 255 else pointFrame = 0;
Chris@320 256
Chris@320 257 stvm->addPoint(SparseTimeValueModel::Point
Chris@320 258 (pointFrame, value, ""));
Chris@320 259
Chris@320 260 } else if (wwfm) {
Chris@320 261
Chris@320 262 float **outbufs = m_plugin->getAudioOutputBuffers();
Chris@320 263
Chris@320 264 if (outbufs) {
Chris@320 265
Chris@320 266 if (blockFrame >= latency) {
Chris@1039 267 sv_frame_t writeSize = std::min
Chris@320 268 (blockSize,
Chris@320 269 contextStart + contextDuration + latency - blockFrame);
Chris@320 270 wwfm->addSamples(outbufs, writeSize);
Chris@320 271 } else if (blockFrame + blockSize >= latency) {
Chris@1039 272 sv_frame_t offset = latency - blockFrame;
Chris@1039 273 sv_frame_t count = blockSize - offset;
Chris@320 274 float **tmp = new float *[channelCount];
Chris@930 275 for (int c = 0; c < channelCount; ++c) {
Chris@320 276 tmp[c] = outbufs[c] + offset;
Chris@320 277 }
Chris@320 278 wwfm->addSamples(tmp, count);
Chris@320 279 delete[] tmp;
Chris@320 280 }
Chris@320 281 }
Chris@320 282 }
Chris@320 283
Chris@1429 284 if (blockFrame == contextStart || completion > prevCompletion) {
Chris@1133 285 // This setCompletion is probably misusing the completion
Chris@1133 286 // terminology, just as it was for WritableWaveFileModel
Chris@1429 287 if (stvm) stvm->setCompletion(completion);
Chris@1429 288 if (wwfm) wwfm->setWriteProportion(completion);
Chris@1429 289 prevCompletion = completion;
Chris@1429 290 }
Chris@320 291
Chris@1429 292 blockFrame += blockSize;
Chris@320 293 }
Chris@320 294
Chris@320 295 if (m_abandoned) return;
Chris@320 296
Chris@320 297 if (stvm) stvm->setCompletion(100);
Chris@1133 298 if (wwfm) wwfm->writeComplete();
Chris@320 299 }
Chris@320 300