comparison transform/RealTimeEffectModelTransformer.cpp @ 388:370aa9714ef5

* Move plugin/transform to plain transform. This way transform can depend on model and GUI classes, but plugin doesn't have to.
author Chris Cannam
date Wed, 12 Mar 2008 18:02:17 +0000
parents plugin/transform/RealTimeEffectModelTransformer.cpp@876a79afd376
children c8955c486340
comparison
equal deleted inserted replaced
387:7aa1de571880 388:370aa9714ef5
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
2
3 /*
4 Sonic Visualiser
5 An audio file viewer and annotation editor.
6 Centre for Digital Music, Queen Mary, University of London.
7 This file copyright 2006 Chris Cannam and QMUL.
8
9 This program is free software; you can redistribute it and/or
10 modify it under the terms of the GNU General Public License as
11 published by the Free Software Foundation; either version 2 of the
12 License, or (at your option) any later version. See the file
13 COPYING included with this distribution for more information.
14 */
15
16 #include "RealTimeEffectModelTransformer.h"
17
18 #include "plugin/RealTimePluginFactory.h"
19 #include "plugin/RealTimePluginInstance.h"
20 #include "plugin/PluginXml.h"
21
22 #include "data/model/Model.h"
23 #include "data/model/SparseTimeValueModel.h"
24 #include "data/model/DenseTimeValueModel.h"
25 #include "data/model/WritableWaveFileModel.h"
26 #include "data/model/WaveFileModel.h"
27
28 #include "TransformFactory.h"
29
30 #include <iostream>
31
32 RealTimeEffectModelTransformer::RealTimeEffectModelTransformer(Input in,
33 const Transform &transform) :
34 ModelTransformer(in, transform),
35 m_plugin(0)
36 {
37 m_units = TransformFactory::getInstance()->getTransformUnits
38 (transform.getIdentifier());
39 m_outputNo =
40 (transform.getOutput() == "A") ? -1 : transform.getOutput().toInt();
41
42 QString pluginId = transform.getPluginIdentifier();
43
44 if (!m_transform.getBlockSize()) m_transform.setBlockSize(1024);
45
46 // std::cerr << "RealTimeEffectModelTransformer::RealTimeEffectModelTransformer: plugin " << pluginId.toStdString() << ", output " << output << std::endl;
47
48 RealTimePluginFactory *factory =
49 RealTimePluginFactory::instanceFor(pluginId);
50
51 if (!factory) {
52 std::cerr << "RealTimeEffectModelTransformer: No factory available for plugin id \""
53 << pluginId.toStdString() << "\"" << std::endl;
54 return;
55 }
56
57 DenseTimeValueModel *input = getConformingInput();
58 if (!input) return;
59
60 m_plugin = factory->instantiatePlugin(pluginId, 0, 0,
61 input->getSampleRate(),
62 m_transform.getBlockSize(),
63 input->getChannelCount());
64
65 if (!m_plugin) {
66 std::cerr << "RealTimeEffectModelTransformer: Failed to instantiate plugin \""
67 << pluginId.toStdString() << "\"" << std::endl;
68 return;
69 }
70
71 TransformFactory::getInstance()->setPluginParameters(m_transform, m_plugin);
72
73 if (m_outputNo >= 0 &&
74 m_outputNo >= int(m_plugin->getControlOutputCount())) {
75 std::cerr << "RealTimeEffectModelTransformer: Plugin has fewer than desired " << m_outputNo << " control outputs" << std::endl;
76 return;
77 }
78
79 if (m_outputNo == -1) {
80
81 size_t outputChannels = m_plugin->getAudioOutputCount();
82 if (outputChannels > input->getChannelCount()) {
83 outputChannels = input->getChannelCount();
84 }
85
86 WritableWaveFileModel *model = new WritableWaveFileModel
87 (input->getSampleRate(), outputChannels);
88
89 m_output = model;
90
91 } else {
92
93 SparseTimeValueModel *model = new SparseTimeValueModel
94 (input->getSampleRate(), m_transform.getBlockSize(), 0.0, 0.0, false);
95
96 if (m_units != "") model->setScaleUnits(m_units);
97
98 m_output = model;
99 }
100 }
101
102 RealTimeEffectModelTransformer::~RealTimeEffectModelTransformer()
103 {
104 delete m_plugin;
105 }
106
107 DenseTimeValueModel *
108 RealTimeEffectModelTransformer::getConformingInput()
109 {
110 DenseTimeValueModel *dtvm =
111 dynamic_cast<DenseTimeValueModel *>(getInputModel());
112 if (!dtvm) {
113 std::cerr << "RealTimeEffectModelTransformer::getConformingInput: WARNING: Input model is not conformable to DenseTimeValueModel" << std::endl;
114 }
115 return dtvm;
116 }
117
118 void
119 RealTimeEffectModelTransformer::run()
120 {
121 DenseTimeValueModel *input = getConformingInput();
122 if (!input) return;
123
124 while (!input->isReady()) {
125 if (dynamic_cast<WaveFileModel *>(input)) break; // no need to wait
126 std::cerr << "RealTimeEffectModelTransformer::run: Waiting for input model to be ready..." << std::endl;
127 sleep(1);
128 }
129
130 SparseTimeValueModel *stvm = dynamic_cast<SparseTimeValueModel *>(m_output);
131 WritableWaveFileModel *wwfm = dynamic_cast<WritableWaveFileModel *>(m_output);
132 if (!stvm && !wwfm) return;
133
134 if (stvm && (m_outputNo >= int(m_plugin->getControlOutputCount()))) return;
135
136 size_t sampleRate = input->getSampleRate();
137 size_t channelCount = input->getChannelCount();
138 if (!wwfm && m_input.getChannel() != -1) channelCount = 1;
139
140 long blockSize = m_plugin->getBufferSize();
141
142 float **inbufs = m_plugin->getAudioInputBuffers();
143
144 long startFrame = m_input.getModel()->getStartFrame();
145 long endFrame = m_input.getModel()->getEndFrame();
146
147 RealTime contextStartRT = m_transform.getStartTime();
148 RealTime contextDurationRT = m_transform.getDuration();
149
150 long contextStart =
151 RealTime::realTime2Frame(contextStartRT, sampleRate);
152
153 long contextDuration =
154 RealTime::realTime2Frame(contextDurationRT, sampleRate);
155
156 if (contextStart == 0 || contextStart < startFrame) {
157 contextStart = startFrame;
158 }
159
160 if (contextDuration == 0) {
161 contextDuration = endFrame - contextStart;
162 }
163 if (contextStart + contextDuration > endFrame) {
164 contextDuration = endFrame - contextStart;
165 }
166
167 wwfm->setStartFrame(contextStart);
168
169 long blockFrame = contextStart;
170
171 long prevCompletion = 0;
172
173 long latency = m_plugin->getLatency();
174
175 while (blockFrame < contextStart + contextDuration + latency &&
176 !m_abandoned) {
177
178 long completion =
179 (((blockFrame - contextStart) / blockSize) * 99) /
180 ((contextDuration) / blockSize);
181
182 long got = 0;
183
184 if (channelCount == 1) {
185 if (inbufs && inbufs[0]) {
186 got = input->getData
187 (m_input.getChannel(), blockFrame, blockSize, inbufs[0]);
188 while (got < blockSize) {
189 inbufs[0][got++] = 0.0;
190 }
191 }
192 for (size_t ch = 1; ch < m_plugin->getAudioInputCount(); ++ch) {
193 for (long i = 0; i < blockSize; ++i) {
194 inbufs[ch][i] = inbufs[0][i];
195 }
196 }
197 } else {
198 got = input->getData(0, channelCount - 1,
199 blockFrame, blockSize,
200 inbufs);
201 while (got < blockSize) {
202 for (size_t ch = 0; ch < channelCount; ++ch) {
203 inbufs[ch][got] = 0.0;
204 }
205 ++got;
206 }
207 for (size_t ch = channelCount; ch < m_plugin->getAudioInputCount(); ++ch) {
208 for (long i = 0; i < blockSize; ++i) {
209 inbufs[ch][i] = inbufs[ch % channelCount][i];
210 }
211 }
212 }
213
214 /*
215 std::cerr << "Input for plugin: " << m_plugin->getAudioInputCount() << " channels "<< std::endl;
216
217 for (size_t ch = 0; ch < m_plugin->getAudioInputCount(); ++ch) {
218 std::cerr << "Input channel " << ch << std::endl;
219 for (size_t i = 0; i < 100; ++i) {
220 std::cerr << inbufs[ch][i] << " ";
221 if (isnan(inbufs[ch][i])) {
222 std::cerr << "\n\nWARNING: NaN in audio input" << std::endl;
223 }
224 }
225 }
226 */
227
228 m_plugin->run(Vamp::RealTime::frame2RealTime(blockFrame, sampleRate));
229
230 if (stvm) {
231
232 float value = m_plugin->getControlOutputValue(m_outputNo);
233
234 long pointFrame = blockFrame;
235 if (pointFrame > latency) pointFrame -= latency;
236 else pointFrame = 0;
237
238 stvm->addPoint(SparseTimeValueModel::Point
239 (pointFrame, value, ""));
240
241 } else if (wwfm) {
242
243 float **outbufs = m_plugin->getAudioOutputBuffers();
244
245 if (outbufs) {
246
247 if (blockFrame >= latency) {
248 long writeSize = std::min
249 (blockSize,
250 contextStart + contextDuration + latency - blockFrame);
251 wwfm->addSamples(outbufs, writeSize);
252 } else if (blockFrame + blockSize >= latency) {
253 long offset = latency - blockFrame;
254 long count = blockSize - offset;
255 float **tmp = new float *[channelCount];
256 for (size_t c = 0; c < channelCount; ++c) {
257 tmp[c] = outbufs[c] + offset;
258 }
259 wwfm->addSamples(tmp, count);
260 delete[] tmp;
261 }
262 }
263 }
264
265 if (blockFrame == contextStart || completion > prevCompletion) {
266 if (stvm) stvm->setCompletion(completion);
267 if (wwfm) wwfm->setCompletion(completion);
268 prevCompletion = completion;
269 }
270
271 blockFrame += blockSize;
272 }
273
274 if (m_abandoned) return;
275
276 if (stvm) stvm->setCompletion(100);
277 if (wwfm) wwfm->setCompletion(100);
278 }
279