Chris@43
|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
|
Chris@43
|
2
|
Chris@43
|
3 /*
|
Chris@43
|
4 Sonic Visualiser
|
Chris@43
|
5 An audio file viewer and annotation editor.
|
Chris@43
|
6 Centre for Digital Music, Queen Mary, University of London.
|
Chris@43
|
7 This file copyright 2006 Chris Cannam.
|
Chris@43
|
8
|
Chris@43
|
9 This program is free software; you can redistribute it and/or
|
Chris@43
|
10 modify it under the terms of the GNU General Public License as
|
Chris@43
|
11 published by the Free Software Foundation; either version 2 of the
|
Chris@43
|
12 License, or (at your option) any later version. See the file
|
Chris@43
|
13 COPYING included with this distribution for more information.
|
Chris@43
|
14 */
|
Chris@43
|
15
|
Chris@43
|
16 #include "AudioGenerator.h"
|
Chris@43
|
17
|
Chris@43
|
18 #include "base/TempDirectory.h"
|
Chris@43
|
19 #include "base/PlayParameters.h"
|
Chris@43
|
20 #include "base/PlayParameterRepository.h"
|
Chris@43
|
21 #include "base/Pitch.h"
|
Chris@43
|
22 #include "base/Exceptions.h"
|
Chris@43
|
23
|
Chris@43
|
24 #include "data/model/NoteModel.h"
|
Chris@278
|
25 #include "data/model/FlexiNoteModel.h"
|
Chris@43
|
26 #include "data/model/DenseTimeValueModel.h"
|
Chris@43
|
27 #include "data/model/SparseOneDimensionalModel.h"
|
Chris@299
|
28 #include "data/model/NoteData.h"
|
Chris@43
|
29
|
Chris@307
|
30 #include "ClipMixer.h"
|
Chris@307
|
31
|
Chris@43
|
32 #include <iostream>
|
Chris@167
|
33 #include <cmath>
|
Chris@43
|
34
|
Chris@43
|
35 #include <QDir>
|
Chris@43
|
36 #include <QFile>
|
Chris@43
|
37
|
Chris@43
|
38 const size_t
|
Chris@305
|
39 AudioGenerator::m_processingBlockSize = 2048;
|
Chris@43
|
40
|
Chris@43
|
41 QString
|
Chris@43
|
42 AudioGenerator::m_sampleDir = "";
|
Chris@43
|
43
|
Chris@43
|
44 //#define DEBUG_AUDIO_GENERATOR 1
|
Chris@43
|
45
|
Chris@43
|
46 AudioGenerator::AudioGenerator() :
|
Chris@43
|
47 m_sourceSampleRate(0),
|
Chris@43
|
48 m_targetChannelCount(1),
|
Chris@43
|
49 m_soloing(false)
|
Chris@43
|
50 {
|
Chris@108
|
51 initialiseSampleDir();
|
Chris@43
|
52
|
Chris@43
|
53 connect(PlayParameterRepository::getInstance(),
|
Chris@309
|
54 SIGNAL(playClipIdChanged(const Playable *, QString)),
|
Chris@43
|
55 this,
|
Chris@309
|
56 SLOT(playClipIdChanged(const Playable *, QString)));
|
Chris@43
|
57 }
|
Chris@43
|
58
|
Chris@43
|
59 AudioGenerator::~AudioGenerator()
|
Chris@43
|
60 {
|
Chris@177
|
61 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@233
|
62 SVDEBUG << "AudioGenerator::~AudioGenerator" << endl;
|
Chris@177
|
63 #endif
|
Chris@43
|
64 }
|
Chris@43
|
65
|
Chris@108
|
66 void
|
Chris@108
|
67 AudioGenerator::initialiseSampleDir()
|
Chris@43
|
68 {
|
Chris@108
|
69 if (m_sampleDir != "") return;
|
Chris@108
|
70
|
Chris@108
|
71 try {
|
Chris@108
|
72 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
|
Chris@108
|
73 } catch (DirectoryCreationFailed f) {
|
Chris@293
|
74 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
|
Chris@108
|
75 << " Failed to create temporary sample directory"
|
Chris@293
|
76 << endl;
|
Chris@108
|
77 m_sampleDir = "";
|
Chris@108
|
78 return;
|
Chris@108
|
79 }
|
Chris@108
|
80
|
Chris@108
|
81 QDir sampleResourceDir(":/samples", "*.wav");
|
Chris@108
|
82
|
Chris@108
|
83 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
|
Chris@108
|
84
|
Chris@108
|
85 QString fileName(sampleResourceDir[i]);
|
Chris@108
|
86 QFile file(sampleResourceDir.filePath(fileName));
|
Chris@151
|
87 QString target = QDir(m_sampleDir).filePath(fileName);
|
Chris@108
|
88
|
Chris@151
|
89 if (!file.copy(target)) {
|
Chris@293
|
90 cerr << "WARNING: AudioGenerator::getSampleDir: "
|
Chris@294
|
91 << "Unable to copy " << fileName
|
Chris@108
|
92 << " into temporary directory \""
|
Chris@293
|
93 << m_sampleDir << "\"" << endl;
|
Chris@151
|
94 } else {
|
Chris@151
|
95 QFile tf(target);
|
Chris@151
|
96 tf.setPermissions(tf.permissions() |
|
Chris@151
|
97 QFile::WriteOwner |
|
Chris@151
|
98 QFile::WriteUser);
|
Chris@108
|
99 }
|
Chris@43
|
100 }
|
Chris@43
|
101 }
|
Chris@43
|
102
|
Chris@43
|
103 bool
|
Chris@43
|
104 AudioGenerator::addModel(Model *model)
|
Chris@43
|
105 {
|
Chris@43
|
106 if (m_sourceSampleRate == 0) {
|
Chris@43
|
107
|
Chris@43
|
108 m_sourceSampleRate = model->getSampleRate();
|
Chris@43
|
109
|
Chris@43
|
110 } else {
|
Chris@43
|
111
|
Chris@43
|
112 DenseTimeValueModel *dtvm =
|
Chris@43
|
113 dynamic_cast<DenseTimeValueModel *>(model);
|
Chris@43
|
114
|
Chris@43
|
115 if (dtvm) {
|
Chris@43
|
116 m_sourceSampleRate = model->getSampleRate();
|
Chris@43
|
117 return true;
|
Chris@43
|
118 }
|
Chris@43
|
119 }
|
Chris@307
|
120
|
Chris@307
|
121 ClipMixer *mixer = makeClipMixerFor(model);
|
Chris@307
|
122 if (mixer) {
|
Chris@43
|
123 QMutexLocker locker(&m_mutex);
|
Chris@307
|
124 m_clipMixerMap[model] = mixer;
|
Chris@43
|
125 return true;
|
Chris@43
|
126 }
|
Chris@307
|
127
|
Chris@43
|
128 return false;
|
Chris@43
|
129 }
|
Chris@43
|
130
|
Chris@43
|
131 void
|
Chris@309
|
132 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
|
Chris@43
|
133 {
|
Chris@108
|
134 const Model *model = dynamic_cast<const Model *>(playable);
|
Chris@108
|
135 if (!model) {
|
Chris@309
|
136 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
|
Chris@108
|
137 << playable << " is not a supported model type"
|
Chris@293
|
138 << endl;
|
Chris@108
|
139 return;
|
Chris@108
|
140 }
|
Chris@108
|
141
|
Chris@307
|
142 if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
|
Chris@307
|
143
|
Chris@307
|
144 ClipMixer *mixer = makeClipMixerFor(model);
|
Chris@307
|
145 if (mixer) {
|
Chris@43
|
146 QMutexLocker locker(&m_mutex);
|
Chris@307
|
147 m_clipMixerMap[model] = mixer;
|
Chris@43
|
148 }
|
Chris@43
|
149 }
|
Chris@308
|
150
|
Chris@305
|
151 /*!!!
|
Chris@43
|
152 void
|
Chris@108
|
153 AudioGenerator::playPluginConfigurationChanged(const Playable *playable,
|
Chris@43
|
154 QString configurationXml)
|
Chris@43
|
155 {
|
Chris@233
|
156 // SVDEBUG << "AudioGenerator::playPluginConfigurationChanged" << endl;
|
Chris@43
|
157
|
Chris@108
|
158 const Model *model = dynamic_cast<const Model *>(playable);
|
Chris@108
|
159 if (!model) {
|
Chris@309
|
160 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
|
Chris@108
|
161 << playable << " is not a supported model type"
|
Chris@293
|
162 << endl;
|
Chris@108
|
163 return;
|
Chris@108
|
164 }
|
Chris@108
|
165
|
Chris@43
|
166 if (m_synthMap.find(model) == m_synthMap.end()) {
|
Chris@233
|
167 SVDEBUG << "AudioGenerator::playPluginConfigurationChanged: We don't know about this plugin" << endl;
|
Chris@43
|
168 return;
|
Chris@43
|
169 }
|
Chris@43
|
170
|
Chris@43
|
171 RealTimePluginInstance *plugin = m_synthMap[model];
|
Chris@43
|
172 if (plugin) {
|
Chris@43
|
173 PluginXml(plugin).setParametersFromXml(configurationXml);
|
Chris@43
|
174 }
|
Chris@43
|
175 }
|
Chris@43
|
176
|
Chris@43
|
177 void
|
Chris@43
|
178 AudioGenerator::setSampleDir(RealTimePluginInstance *plugin)
|
Chris@43
|
179 {
|
Chris@108
|
180 if (m_sampleDir != "") {
|
Chris@108
|
181 plugin->configure("sampledir", m_sampleDir.toStdString());
|
Chris@108
|
182 }
|
Chris@43
|
183 }
|
Chris@307
|
184 */
|
Chris@307
|
185 ClipMixer *
|
Chris@307
|
186 AudioGenerator::makeClipMixerFor(const Model *model)
|
Chris@43
|
187 {
|
Chris@309
|
188 QString clipId;
|
Chris@43
|
189
|
Chris@108
|
190 const Playable *playable = model;
|
Chris@108
|
191 if (!playable || !playable->canPlay()) return 0;
|
Chris@108
|
192
|
Chris@43
|
193 PlayParameters *parameters =
|
Chris@108
|
194 PlayParameterRepository::getInstance()->getPlayParameters(playable);
|
Chris@43
|
195 if (parameters) {
|
Chris@309
|
196 clipId = parameters->getPlayClipId();
|
Chris@43
|
197 }
|
Chris@43
|
198
|
Chris@309
|
199 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
|
Chris@276
|
200
|
Chris@309
|
201 if (clipId == "") {
|
Chris@308
|
202 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
|
Chris@276
|
203 return 0;
|
Chris@276
|
204 }
|
Chris@43
|
205
|
Chris@308
|
206 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
|
Chris@308
|
207 m_sourceSampleRate,
|
Chris@308
|
208 m_processingBlockSize);
|
Chris@307
|
209
|
Chris@308
|
210 float clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0f); // required
|
Chris@307
|
211
|
Chris@309
|
212 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
|
Chris@307
|
213
|
Chris@308
|
214 if (!mixer->loadClipData(clipPath, clipF0)) {
|
Chris@308
|
215 delete mixer;
|
Chris@43
|
216 return 0;
|
Chris@43
|
217 }
|
Chris@43
|
218
|
Chris@309
|
219 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
|
Chris@43
|
220
|
Chris@308
|
221 return mixer;
|
Chris@308
|
222 }
|
Chris@43
|
223
|
Chris@43
|
224 void
|
Chris@43
|
225 AudioGenerator::removeModel(Model *model)
|
Chris@43
|
226 {
|
Chris@43
|
227 SparseOneDimensionalModel *sodm =
|
Chris@43
|
228 dynamic_cast<SparseOneDimensionalModel *>(model);
|
Chris@43
|
229 if (!sodm) return; // nothing to do
|
Chris@43
|
230
|
Chris@43
|
231 QMutexLocker locker(&m_mutex);
|
Chris@43
|
232
|
Chris@308
|
233 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
|
Chris@43
|
234
|
Chris@308
|
235 ClipMixer *mixer = m_clipMixerMap[sodm];
|
Chris@308
|
236 m_clipMixerMap.erase(sodm);
|
Chris@308
|
237 delete mixer;
|
Chris@43
|
238 }
|
Chris@43
|
239
|
Chris@43
|
240 void
|
Chris@43
|
241 AudioGenerator::clearModels()
|
Chris@43
|
242 {
|
Chris@43
|
243 QMutexLocker locker(&m_mutex);
|
Chris@308
|
244
|
Chris@308
|
245 while (!m_clipMixerMap.empty()) {
|
Chris@308
|
246 ClipMixer *mixer = m_clipMixerMap.begin()->second;
|
Chris@308
|
247 m_clipMixerMap.erase(m_clipMixerMap.begin());
|
Chris@308
|
248 delete mixer;
|
Chris@43
|
249 }
|
Chris@43
|
250 }
|
Chris@43
|
251
|
Chris@43
|
252 void
|
Chris@43
|
253 AudioGenerator::reset()
|
Chris@43
|
254 {
|
Chris@43
|
255 QMutexLocker locker(&m_mutex);
|
Chris@308
|
256
|
Chris@308
|
257 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
|
Chris@43
|
258 if (i->second) {
|
Chris@308
|
259 i->second->reset();
|
Chris@43
|
260 }
|
Chris@43
|
261 }
|
Chris@43
|
262
|
Chris@43
|
263 m_noteOffs.clear();
|
Chris@43
|
264 }
|
Chris@43
|
265
|
Chris@43
|
266 void
|
Chris@43
|
267 AudioGenerator::setTargetChannelCount(size_t targetChannelCount)
|
Chris@43
|
268 {
|
Chris@43
|
269 if (m_targetChannelCount == targetChannelCount) return;
|
Chris@43
|
270
|
Chris@233
|
271 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
|
Chris@43
|
272
|
Chris@43
|
273 QMutexLocker locker(&m_mutex);
|
Chris@43
|
274 m_targetChannelCount = targetChannelCount;
|
Chris@43
|
275
|
Chris@308
|
276 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
|
Chris@308
|
277 if (i->second) i->second->setChannelCount(targetChannelCount);
|
Chris@43
|
278 }
|
Chris@43
|
279 }
|
Chris@43
|
280
|
Chris@43
|
281 size_t
|
Chris@43
|
282 AudioGenerator::getBlockSize() const
|
Chris@43
|
283 {
|
Chris@305
|
284 return m_processingBlockSize;
|
Chris@43
|
285 }
|
Chris@43
|
286
|
Chris@43
|
287 void
|
Chris@43
|
288 AudioGenerator::setSoloModelSet(std::set<Model *> s)
|
Chris@43
|
289 {
|
Chris@43
|
290 QMutexLocker locker(&m_mutex);
|
Chris@43
|
291
|
Chris@43
|
292 m_soloModelSet = s;
|
Chris@43
|
293 m_soloing = true;
|
Chris@43
|
294 }
|
Chris@43
|
295
|
Chris@43
|
296 void
|
Chris@43
|
297 AudioGenerator::clearSoloModelSet()
|
Chris@43
|
298 {
|
Chris@43
|
299 QMutexLocker locker(&m_mutex);
|
Chris@43
|
300
|
Chris@43
|
301 m_soloModelSet.clear();
|
Chris@43
|
302 m_soloing = false;
|
Chris@43
|
303 }
|
Chris@43
|
304
|
Chris@43
|
305 size_t
|
Chris@43
|
306 AudioGenerator::mixModel(Model *model, size_t startFrame, size_t frameCount,
|
Chris@43
|
307 float **buffer, size_t fadeIn, size_t fadeOut)
|
Chris@43
|
308 {
|
Chris@43
|
309 if (m_sourceSampleRate == 0) {
|
Chris@293
|
310 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
|
Chris@43
|
311 return frameCount;
|
Chris@43
|
312 }
|
Chris@43
|
313
|
Chris@43
|
314 QMutexLocker locker(&m_mutex);
|
Chris@43
|
315
|
Chris@108
|
316 Playable *playable = model;
|
Chris@108
|
317 if (!playable || !playable->canPlay()) return frameCount;
|
Chris@108
|
318
|
Chris@43
|
319 PlayParameters *parameters =
|
Chris@108
|
320 PlayParameterRepository::getInstance()->getPlayParameters(playable);
|
Chris@43
|
321 if (!parameters) return frameCount;
|
Chris@43
|
322
|
Chris@43
|
323 bool playing = !parameters->isPlayMuted();
|
Chris@43
|
324 if (!playing) {
|
Chris@43
|
325 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@293
|
326 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
|
Chris@43
|
327 #endif
|
Chris@43
|
328 return frameCount;
|
Chris@43
|
329 }
|
Chris@43
|
330
|
Chris@43
|
331 if (m_soloing) {
|
Chris@43
|
332 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
|
Chris@43
|
333 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@293
|
334 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
|
Chris@43
|
335 #endif
|
Chris@43
|
336 return frameCount;
|
Chris@43
|
337 }
|
Chris@43
|
338 }
|
Chris@43
|
339
|
Chris@43
|
340 float gain = parameters->getPlayGain();
|
Chris@43
|
341 float pan = parameters->getPlayPan();
|
Chris@43
|
342
|
Chris@43
|
343 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
|
Chris@43
|
344 if (dtvm) {
|
Chris@43
|
345 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
|
Chris@43
|
346 buffer, gain, pan, fadeIn, fadeOut);
|
Chris@43
|
347 }
|
Chris@43
|
348
|
Chris@279
|
349 bool synthetic =
|
Chris@279
|
350 (qobject_cast<SparseOneDimensionalModel *>(model) ||
|
Chris@278
|
351 qobject_cast<NoteModel *>(model) ||
|
Chris@278
|
352 qobject_cast<FlexiNoteModel *>(model));
|
Chris@43
|
353
|
Chris@279
|
354 if (synthetic) {
|
Chris@275
|
355 return mixSyntheticNoteModel(model, startFrame, frameCount,
|
Chris@275
|
356 buffer, gain, pan, fadeIn, fadeOut);
|
Chris@43
|
357 }
|
Chris@43
|
358
|
Chris@276
|
359 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
|
Chris@276
|
360
|
Chris@43
|
361 return frameCount;
|
Chris@43
|
362 }
|
Chris@43
|
363
|
Chris@43
|
364 size_t
|
Chris@43
|
365 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
|
Chris@43
|
366 size_t startFrame, size_t frames,
|
Chris@43
|
367 float **buffer, float gain, float pan,
|
Chris@43
|
368 size_t fadeIn, size_t fadeOut)
|
Chris@43
|
369 {
|
Chris@80
|
370 static float **channelBuffer = 0;
|
Chris@80
|
371 static size_t channelBufSiz = 0;
|
Chris@80
|
372 static size_t channelBufCount = 0;
|
Chris@43
|
373
|
Chris@43
|
374 size_t totalFrames = frames + fadeIn/2 + fadeOut/2;
|
Chris@43
|
375
|
Chris@80
|
376 size_t modelChannels = dtvm->getChannelCount();
|
Chris@80
|
377
|
Chris@80
|
378 if (channelBufSiz < totalFrames || channelBufCount < modelChannels) {
|
Chris@80
|
379
|
Chris@80
|
380 for (size_t c = 0; c < channelBufCount; ++c) {
|
Chris@80
|
381 delete[] channelBuffer[c];
|
Chris@80
|
382 }
|
Chris@80
|
383
|
Chris@43
|
384 delete[] channelBuffer;
|
Chris@80
|
385 channelBuffer = new float *[modelChannels];
|
Chris@80
|
386
|
Chris@80
|
387 for (size_t c = 0; c < modelChannels; ++c) {
|
Chris@80
|
388 channelBuffer[c] = new float[totalFrames];
|
Chris@80
|
389 }
|
Chris@80
|
390
|
Chris@80
|
391 channelBufCount = modelChannels;
|
Chris@43
|
392 channelBufSiz = totalFrames;
|
Chris@43
|
393 }
|
Chris@80
|
394
|
Chris@43
|
395 size_t got = 0;
|
Chris@80
|
396
|
Chris@80
|
397 if (startFrame >= fadeIn/2) {
|
Chris@80
|
398 got = dtvm->getData(0, modelChannels - 1,
|
Chris@80
|
399 startFrame - fadeIn/2,
|
Chris@80
|
400 frames + fadeOut/2 + fadeIn/2,
|
Chris@80
|
401 channelBuffer);
|
Chris@80
|
402 } else {
|
Chris@80
|
403 size_t missing = fadeIn/2 - startFrame;
|
Chris@80
|
404
|
Chris@80
|
405 for (size_t c = 0; c < modelChannels; ++c) {
|
Chris@80
|
406 channelBuffer[c] += missing;
|
Chris@80
|
407 }
|
Chris@80
|
408
|
Chris@80
|
409 got = dtvm->getData(0, modelChannels - 1,
|
Chris@80
|
410 startFrame,
|
Chris@80
|
411 frames + fadeOut/2,
|
Chris@80
|
412 channelBuffer);
|
Chris@80
|
413
|
Chris@80
|
414 for (size_t c = 0; c < modelChannels; ++c) {
|
Chris@80
|
415 channelBuffer[c] -= missing;
|
Chris@80
|
416 }
|
Chris@80
|
417
|
Chris@80
|
418 got += missing;
|
Chris@80
|
419 }
|
Chris@43
|
420
|
Chris@43
|
421 for (size_t c = 0; c < m_targetChannelCount; ++c) {
|
Chris@43
|
422
|
Chris@80
|
423 size_t sourceChannel = (c % modelChannels);
|
Chris@43
|
424
|
Chris@233
|
425 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
|
Chris@43
|
426
|
Chris@43
|
427 float channelGain = gain;
|
Chris@43
|
428 if (pan != 0.0) {
|
Chris@43
|
429 if (c == 0) {
|
Chris@43
|
430 if (pan > 0.0) channelGain *= 1.0 - pan;
|
Chris@43
|
431 } else {
|
Chris@43
|
432 if (pan < 0.0) channelGain *= pan + 1.0;
|
Chris@43
|
433 }
|
Chris@43
|
434 }
|
Chris@43
|
435
|
Chris@43
|
436 for (size_t i = 0; i < fadeIn/2; ++i) {
|
Chris@43
|
437 float *back = buffer[c];
|
Chris@43
|
438 back -= fadeIn/2;
|
Chris@80
|
439 back[i] += (channelGain * channelBuffer[sourceChannel][i] * i) / fadeIn;
|
Chris@43
|
440 }
|
Chris@43
|
441
|
Chris@43
|
442 for (size_t i = 0; i < frames + fadeOut/2; ++i) {
|
Chris@43
|
443 float mult = channelGain;
|
Chris@43
|
444 if (i < fadeIn/2) {
|
Chris@43
|
445 mult = (mult * i) / fadeIn;
|
Chris@43
|
446 }
|
Chris@43
|
447 if (i > frames - fadeOut/2) {
|
Chris@43
|
448 mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut;
|
Chris@43
|
449 }
|
Chris@80
|
450 float val = channelBuffer[sourceChannel][i];
|
Chris@80
|
451 if (i >= got) val = 0.f;
|
Chris@80
|
452 buffer[c][i] += mult * val;
|
Chris@43
|
453 }
|
Chris@43
|
454 }
|
Chris@43
|
455
|
Chris@43
|
456 return got;
|
Chris@43
|
457 }
|
Chris@43
|
458
|
Chris@43
|
459 size_t
|
Chris@275
|
460 AudioGenerator::mixSyntheticNoteModel(Model *model,
|
Chris@275
|
461 size_t startFrame, size_t frames,
|
Chris@275
|
462 float **buffer, float gain, float pan,
|
Chris@275
|
463 size_t /* fadeIn */,
|
Chris@275
|
464 size_t /* fadeOut */)
|
Chris@43
|
465 {
|
Chris@308
|
466 ClipMixer *clipMixer = m_clipMixerMap[model];
|
Chris@308
|
467 if (!clipMixer) return 0;
|
Chris@43
|
468
|
Chris@305
|
469 size_t blocks = frames / m_processingBlockSize;
|
Chris@43
|
470
|
Chris@43
|
471 //!!! hang on -- the fact that the audio callback play source's
|
Chris@43
|
472 //buffer is a multiple of the plugin's buffer size doesn't mean
|
Chris@43
|
473 //that we always get called for a multiple of it here (because it
|
Chris@43
|
474 //also depends on the JACK block size). how should we ensure that
|
Chris@43
|
475 //all models write the same amount in to the mix, and that we
|
Chris@43
|
476 //always have a multiple of the plugin buffer size? I guess this
|
Chris@43
|
477 //class has to be queryable for the plugin buffer size & the
|
Chris@43
|
478 //callback play source has to use that as a multiple for all the
|
Chris@43
|
479 //calls to mixModel
|
Chris@43
|
480
|
Chris@305
|
481 size_t got = blocks * m_processingBlockSize;
|
Chris@43
|
482
|
Chris@43
|
483 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@293
|
484 cout << "mixModel [synthetic note]: frames " << frames
|
Chris@293
|
485 << ", blocks " << blocks << endl;
|
Chris@43
|
486 #endif
|
Chris@43
|
487
|
Chris@308
|
488 ClipMixer::NoteStart on;
|
Chris@308
|
489 ClipMixer::NoteEnd off;
|
Chris@43
|
490
|
Chris@275
|
491 NoteOffSet ¬eOffs = m_noteOffs[model];
|
Chris@43
|
492
|
Chris@308
|
493 float **bufferIndexes = new float *[m_targetChannelCount];
|
Chris@308
|
494
|
Chris@43
|
495 for (size_t i = 0; i < blocks; ++i) {
|
Chris@43
|
496
|
Chris@305
|
497 size_t reqStart = startFrame + i * m_processingBlockSize;
|
Chris@43
|
498
|
Chris@299
|
499 NoteList notes;
|
Chris@299
|
500 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
|
Chris@299
|
501 if (exportable) {
|
Chris@308
|
502 notes = exportable->getNotes(reqStart,
|
Chris@308
|
503 reqStart + m_processingBlockSize);
|
Chris@299
|
504 }
|
Chris@43
|
505
|
Chris@308
|
506 std::vector<ClipMixer::NoteStart> starts;
|
Chris@308
|
507 std::vector<ClipMixer::NoteEnd> ends;
|
Chris@43
|
508
|
Chris@275
|
509 for (NoteList::const_iterator ni = notes.begin();
|
Chris@275
|
510 ni != notes.end(); ++ni) {
|
Chris@43
|
511
|
Chris@275
|
512 size_t noteFrame = ni->start;
|
Chris@43
|
513
|
Chris@275
|
514 if (noteFrame < reqStart ||
|
Chris@305
|
515 noteFrame >= reqStart + m_processingBlockSize) continue;
|
Chris@43
|
516
|
Chris@43
|
517 while (noteOffs.begin() != noteOffs.end() &&
|
Chris@275
|
518 noteOffs.begin()->frame <= noteFrame) {
|
Chris@43
|
519
|
Chris@308
|
520 size_t eventFrame = noteOffs.begin()->frame;
|
Chris@308
|
521 if (eventFrame < reqStart) eventFrame = reqStart;
|
Chris@43
|
522
|
Chris@308
|
523 off.frameOffset = eventFrame - reqStart;
|
Chris@308
|
524 off.frequency = noteOffs.begin()->frequency;
|
Chris@43
|
525
|
Chris@43
|
526 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@308
|
527 cerr << "mixModel [synthetic]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
|
Chris@43
|
528 #endif
|
Chris@43
|
529
|
Chris@308
|
530 ends.push_back(off);
|
Chris@43
|
531 noteOffs.erase(noteOffs.begin());
|
Chris@43
|
532 }
|
Chris@43
|
533
|
Chris@308
|
534 on.frameOffset = noteFrame - reqStart;
|
Chris@308
|
535 on.frequency = ni->getFrequency();
|
Chris@308
|
536 on.level = float(ni->velocity) / 127.0;
|
Chris@308
|
537 on.pan = pan;
|
Chris@43
|
538
|
Chris@43
|
539 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@308
|
540 cout << "mixModel [synthetic]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << endl;
|
Chris@43
|
541 #endif
|
Chris@43
|
542
|
Chris@308
|
543 starts.push_back(on);
|
Chris@275
|
544 noteOffs.insert
|
Chris@308
|
545 (NoteOff(on.frequency, noteFrame + ni->duration));
|
Chris@43
|
546 }
|
Chris@43
|
547
|
Chris@43
|
548 while (noteOffs.begin() != noteOffs.end() &&
|
Chris@308
|
549 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
|
Chris@43
|
550
|
Chris@308
|
551 size_t eventFrame = noteOffs.begin()->frame;
|
Chris@308
|
552 if (eventFrame < reqStart) eventFrame = reqStart;
|
Chris@43
|
553
|
Chris@308
|
554 off.frameOffset = eventFrame - reqStart;
|
Chris@308
|
555 off.frequency = noteOffs.begin()->frequency;
|
Chris@43
|
556
|
Chris@43
|
557 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@308
|
558 cerr << "mixModel [synthetic]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
|
Chris@43
|
559 #endif
|
Chris@43
|
560
|
Chris@308
|
561 ends.push_back(off);
|
Chris@308
|
562 noteOffs.erase(noteOffs.begin());
|
Chris@43
|
563 }
|
Chris@43
|
564
|
Chris@43
|
565 for (size_t c = 0; c < m_targetChannelCount; ++c) {
|
Chris@308
|
566 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
|
Chris@308
|
567 }
|
Chris@43
|
568
|
Chris@308
|
569 clipMixer->mix(bufferIndexes, gain, starts, ends);
|
Chris@308
|
570 }
|
Chris@43
|
571
|
Chris@308
|
572 delete[] bufferIndexes;
|
Chris@43
|
573
|
Chris@43
|
574 return got;
|
Chris@43
|
575 }
|