Chris@43
|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
|
Chris@43
|
2
|
Chris@43
|
3 /*
|
Chris@43
|
4 Sonic Visualiser
|
Chris@43
|
5 An audio file viewer and annotation editor.
|
Chris@43
|
6 Centre for Digital Music, Queen Mary, University of London.
|
Chris@43
|
7 This file copyright 2006 Chris Cannam.
|
Chris@43
|
8
|
Chris@43
|
9 This program is free software; you can redistribute it and/or
|
Chris@43
|
10 modify it under the terms of the GNU General Public License as
|
Chris@43
|
11 published by the Free Software Foundation; either version 2 of the
|
Chris@43
|
12 License, or (at your option) any later version. See the file
|
Chris@43
|
13 COPYING included with this distribution for more information.
|
Chris@43
|
14 */
|
Chris@43
|
15
|
Chris@43
|
16 #include "AudioGenerator.h"
|
Chris@43
|
17
|
Chris@43
|
18 #include "base/TempDirectory.h"
|
Chris@43
|
19 #include "base/PlayParameters.h"
|
Chris@43
|
20 #include "base/PlayParameterRepository.h"
|
Chris@43
|
21 #include "base/Pitch.h"
|
Chris@43
|
22 #include "base/Exceptions.h"
|
Chris@43
|
23
|
Chris@43
|
24 #include "data/model/NoteModel.h"
|
Chris@278
|
25 #include "data/model/FlexiNoteModel.h"
|
Chris@43
|
26 #include "data/model/DenseTimeValueModel.h"
|
Chris@313
|
27 #include "data/model/SparseTimeValueModel.h"
|
Chris@43
|
28 #include "data/model/SparseOneDimensionalModel.h"
|
Chris@299
|
29 #include "data/model/NoteData.h"
|
Chris@43
|
30
|
Chris@307
|
31 #include "ClipMixer.h"
|
Chris@313
|
32 #include "ContinuousSynth.h"
|
Chris@307
|
33
|
Chris@43
|
34 #include <iostream>
|
Chris@167
|
35 #include <cmath>
|
Chris@43
|
36
|
Chris@43
|
37 #include <QDir>
|
Chris@43
|
38 #include <QFile>
|
Chris@43
|
39
|
Chris@366
|
40 const int
|
Chris@315
|
41 AudioGenerator::m_processingBlockSize = 1024;
|
Chris@43
|
42
|
Chris@43
|
43 QString
|
Chris@43
|
44 AudioGenerator::m_sampleDir = "";
|
Chris@43
|
45
|
Chris@43
|
46 //#define DEBUG_AUDIO_GENERATOR 1
|
Chris@43
|
47
|
Chris@43
|
48 AudioGenerator::AudioGenerator() :
|
Chris@43
|
49 m_sourceSampleRate(0),
|
Chris@43
|
50 m_targetChannelCount(1),
|
Chris@348
|
51 m_waveType(0),
|
Chris@382
|
52 m_soloing(false),
|
Chris@382
|
53 m_channelBuffer(0),
|
Chris@382
|
54 m_channelBufSiz(0),
|
Chris@382
|
55 m_channelBufCount(0)
|
Chris@43
|
56 {
|
Chris@108
|
57 initialiseSampleDir();
|
Chris@43
|
58
|
Chris@43
|
59 connect(PlayParameterRepository::getInstance(),
|
Chris@309
|
60 SIGNAL(playClipIdChanged(const Playable *, QString)),
|
Chris@43
|
61 this,
|
Chris@309
|
62 SLOT(playClipIdChanged(const Playable *, QString)));
|
Chris@43
|
63 }
|
Chris@43
|
64
|
Chris@43
|
65 AudioGenerator::~AudioGenerator()
|
Chris@43
|
66 {
|
Chris@177
|
67 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@233
|
68 SVDEBUG << "AudioGenerator::~AudioGenerator" << endl;
|
Chris@177
|
69 #endif
|
Chris@43
|
70 }
|
Chris@43
|
71
|
Chris@108
|
72 void
|
Chris@108
|
73 AudioGenerator::initialiseSampleDir()
|
Chris@43
|
74 {
|
Chris@108
|
75 if (m_sampleDir != "") return;
|
Chris@108
|
76
|
Chris@108
|
77 try {
|
Chris@108
|
78 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
|
Chris@108
|
79 } catch (DirectoryCreationFailed f) {
|
Chris@293
|
80 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
|
Chris@108
|
81 << " Failed to create temporary sample directory"
|
Chris@293
|
82 << endl;
|
Chris@108
|
83 m_sampleDir = "";
|
Chris@108
|
84 return;
|
Chris@108
|
85 }
|
Chris@108
|
86
|
Chris@108
|
87 QDir sampleResourceDir(":/samples", "*.wav");
|
Chris@108
|
88
|
Chris@108
|
89 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
|
Chris@108
|
90
|
Chris@108
|
91 QString fileName(sampleResourceDir[i]);
|
Chris@108
|
92 QFile file(sampleResourceDir.filePath(fileName));
|
Chris@151
|
93 QString target = QDir(m_sampleDir).filePath(fileName);
|
Chris@108
|
94
|
Chris@151
|
95 if (!file.copy(target)) {
|
Chris@293
|
96 cerr << "WARNING: AudioGenerator::getSampleDir: "
|
Chris@294
|
97 << "Unable to copy " << fileName
|
Chris@108
|
98 << " into temporary directory \""
|
Chris@293
|
99 << m_sampleDir << "\"" << endl;
|
Chris@151
|
100 } else {
|
Chris@151
|
101 QFile tf(target);
|
Chris@151
|
102 tf.setPermissions(tf.permissions() |
|
Chris@151
|
103 QFile::WriteOwner |
|
Chris@151
|
104 QFile::WriteUser);
|
Chris@108
|
105 }
|
Chris@43
|
106 }
|
Chris@43
|
107 }
|
Chris@43
|
108
|
Chris@43
|
109 bool
|
Chris@43
|
110 AudioGenerator::addModel(Model *model)
|
Chris@43
|
111 {
|
Chris@43
|
112 if (m_sourceSampleRate == 0) {
|
Chris@43
|
113
|
Chris@43
|
114 m_sourceSampleRate = model->getSampleRate();
|
Chris@43
|
115
|
Chris@43
|
116 } else {
|
Chris@43
|
117
|
Chris@43
|
118 DenseTimeValueModel *dtvm =
|
Chris@43
|
119 dynamic_cast<DenseTimeValueModel *>(model);
|
Chris@43
|
120
|
Chris@43
|
121 if (dtvm) {
|
Chris@43
|
122 m_sourceSampleRate = model->getSampleRate();
|
Chris@43
|
123 return true;
|
Chris@43
|
124 }
|
Chris@43
|
125 }
|
Chris@307
|
126
|
Chris@313
|
127 if (usesClipMixer(model)) {
|
Chris@313
|
128 ClipMixer *mixer = makeClipMixerFor(model);
|
Chris@313
|
129 if (mixer) {
|
Chris@313
|
130 QMutexLocker locker(&m_mutex);
|
Chris@313
|
131 m_clipMixerMap[model] = mixer;
|
Chris@313
|
132 return true;
|
Chris@313
|
133 }
|
Chris@313
|
134 }
|
Chris@313
|
135
|
Chris@313
|
136 if (usesContinuousSynth(model)) {
|
Chris@313
|
137 ContinuousSynth *synth = makeSynthFor(model);
|
Chris@313
|
138 if (synth) {
|
Chris@313
|
139 QMutexLocker locker(&m_mutex);
|
Chris@313
|
140 m_continuousSynthMap[model] = synth;
|
Chris@313
|
141 return true;
|
Chris@313
|
142 }
|
Chris@43
|
143 }
|
Chris@307
|
144
|
Chris@43
|
145 return false;
|
Chris@43
|
146 }
|
Chris@43
|
147
|
Chris@43
|
148 void
|
Chris@309
|
149 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
|
Chris@43
|
150 {
|
Chris@108
|
151 const Model *model = dynamic_cast<const Model *>(playable);
|
Chris@108
|
152 if (!model) {
|
Chris@309
|
153 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
|
Chris@108
|
154 << playable << " is not a supported model type"
|
Chris@293
|
155 << endl;
|
Chris@108
|
156 return;
|
Chris@108
|
157 }
|
Chris@108
|
158
|
Chris@307
|
159 if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
|
Chris@307
|
160
|
Chris@307
|
161 ClipMixer *mixer = makeClipMixerFor(model);
|
Chris@307
|
162 if (mixer) {
|
Chris@43
|
163 QMutexLocker locker(&m_mutex);
|
Chris@307
|
164 m_clipMixerMap[model] = mixer;
|
Chris@43
|
165 }
|
Chris@43
|
166 }
|
Chris@308
|
167
|
Chris@313
|
168 bool
|
Chris@313
|
169 AudioGenerator::usesClipMixer(const Model *model)
|
Chris@43
|
170 {
|
Chris@313
|
171 bool clip =
|
Chris@313
|
172 (qobject_cast<const SparseOneDimensionalModel *>(model) ||
|
Chris@313
|
173 qobject_cast<const NoteModel *>(model) ||
|
Chris@313
|
174 qobject_cast<const FlexiNoteModel *>(model));
|
Chris@313
|
175 return clip;
|
Chris@43
|
176 }
|
Chris@43
|
177
|
Chris@313
|
178 bool
|
Chris@349
|
179 AudioGenerator::wantsQuieterClips(const Model *model)
|
Chris@349
|
180 {
|
Chris@349
|
181 // basically, anything that usually has sustain (like notes) or
|
Chris@349
|
182 // often has multiple sounds at once (like notes) wants to use a
|
Chris@349
|
183 // quieter level than simple click tracks
|
Chris@349
|
184 bool does =
|
Chris@349
|
185 (qobject_cast<const NoteModel *>(model) ||
|
Chris@349
|
186 qobject_cast<const FlexiNoteModel *>(model));
|
Chris@349
|
187 return does;
|
Chris@349
|
188 }
|
Chris@349
|
189
|
Chris@349
|
190 bool
|
Chris@313
|
191 AudioGenerator::usesContinuousSynth(const Model *model)
|
Chris@43
|
192 {
|
Chris@313
|
193 bool cont =
|
Chris@313
|
194 (qobject_cast<const SparseTimeValueModel *>(model));
|
Chris@313
|
195 return cont;
|
Chris@313
|
196 }
|
Chris@313
|
197
|
Chris@307
|
198 ClipMixer *
|
Chris@307
|
199 AudioGenerator::makeClipMixerFor(const Model *model)
|
Chris@43
|
200 {
|
Chris@309
|
201 QString clipId;
|
Chris@43
|
202
|
Chris@108
|
203 const Playable *playable = model;
|
Chris@108
|
204 if (!playable || !playable->canPlay()) return 0;
|
Chris@108
|
205
|
Chris@43
|
206 PlayParameters *parameters =
|
Chris@108
|
207 PlayParameterRepository::getInstance()->getPlayParameters(playable);
|
Chris@43
|
208 if (parameters) {
|
Chris@309
|
209 clipId = parameters->getPlayClipId();
|
Chris@43
|
210 }
|
Chris@43
|
211
|
Chris@309
|
212 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
|
Chris@276
|
213
|
Chris@309
|
214 if (clipId == "") {
|
Chris@308
|
215 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
|
Chris@276
|
216 return 0;
|
Chris@276
|
217 }
|
Chris@43
|
218
|
Chris@308
|
219 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
|
Chris@308
|
220 m_sourceSampleRate,
|
Chris@308
|
221 m_processingBlockSize);
|
Chris@307
|
222
|
Chris@308
|
223 float clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0f); // required
|
Chris@307
|
224
|
Chris@309
|
225 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
|
Chris@307
|
226
|
Chris@349
|
227 float level = wantsQuieterClips(model) ? 0.5 : 1.0;
|
Chris@349
|
228 if (!mixer->loadClipData(clipPath, clipF0, level)) {
|
Chris@308
|
229 delete mixer;
|
Chris@43
|
230 return 0;
|
Chris@43
|
231 }
|
Chris@43
|
232
|
Chris@309
|
233 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
|
Chris@43
|
234
|
Chris@308
|
235 return mixer;
|
Chris@308
|
236 }
|
Chris@43
|
237
|
Chris@313
|
238 ContinuousSynth *
|
Chris@313
|
239 AudioGenerator::makeSynthFor(const Model *model)
|
Chris@313
|
240 {
|
Chris@313
|
241 const Playable *playable = model;
|
Chris@313
|
242 if (!playable || !playable->canPlay()) return 0;
|
Chris@313
|
243
|
Chris@313
|
244 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
|
Chris@313
|
245 m_sourceSampleRate,
|
rmb456@323
|
246 m_processingBlockSize,
|
rmb456@323
|
247 m_waveType);
|
Chris@313
|
248
|
Chris@313
|
249 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
|
Chris@313
|
250
|
Chris@313
|
251 return synth;
|
Chris@313
|
252 }
|
Chris@313
|
253
|
Chris@43
|
254 void
|
Chris@43
|
255 AudioGenerator::removeModel(Model *model)
|
Chris@43
|
256 {
|
Chris@43
|
257 SparseOneDimensionalModel *sodm =
|
Chris@43
|
258 dynamic_cast<SparseOneDimensionalModel *>(model);
|
Chris@43
|
259 if (!sodm) return; // nothing to do
|
Chris@43
|
260
|
Chris@43
|
261 QMutexLocker locker(&m_mutex);
|
Chris@43
|
262
|
Chris@308
|
263 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
|
Chris@43
|
264
|
Chris@308
|
265 ClipMixer *mixer = m_clipMixerMap[sodm];
|
Chris@308
|
266 m_clipMixerMap.erase(sodm);
|
Chris@308
|
267 delete mixer;
|
Chris@43
|
268 }
|
Chris@43
|
269
|
Chris@43
|
270 void
|
Chris@43
|
271 AudioGenerator::clearModels()
|
Chris@43
|
272 {
|
Chris@43
|
273 QMutexLocker locker(&m_mutex);
|
Chris@308
|
274
|
Chris@308
|
275 while (!m_clipMixerMap.empty()) {
|
Chris@308
|
276 ClipMixer *mixer = m_clipMixerMap.begin()->second;
|
Chris@308
|
277 m_clipMixerMap.erase(m_clipMixerMap.begin());
|
Chris@308
|
278 delete mixer;
|
Chris@43
|
279 }
|
Chris@43
|
280 }
|
Chris@43
|
281
|
Chris@43
|
282 void
|
Chris@43
|
283 AudioGenerator::reset()
|
Chris@43
|
284 {
|
Chris@43
|
285 QMutexLocker locker(&m_mutex);
|
Chris@308
|
286
|
Chris@308
|
287 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
|
Chris@43
|
288 if (i->second) {
|
Chris@308
|
289 i->second->reset();
|
Chris@43
|
290 }
|
Chris@43
|
291 }
|
Chris@43
|
292
|
Chris@43
|
293 m_noteOffs.clear();
|
Chris@43
|
294 }
|
Chris@43
|
295
|
Chris@43
|
296 void
|
Chris@366
|
297 AudioGenerator::setTargetChannelCount(int targetChannelCount)
|
Chris@43
|
298 {
|
Chris@43
|
299 if (m_targetChannelCount == targetChannelCount) return;
|
Chris@43
|
300
|
Chris@233
|
301 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
|
Chris@43
|
302
|
Chris@43
|
303 QMutexLocker locker(&m_mutex);
|
Chris@43
|
304 m_targetChannelCount = targetChannelCount;
|
Chris@43
|
305
|
Chris@308
|
306 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
|
Chris@308
|
307 if (i->second) i->second->setChannelCount(targetChannelCount);
|
Chris@43
|
308 }
|
Chris@43
|
309 }
|
Chris@43
|
310
|
Chris@366
|
311 int
|
Chris@43
|
312 AudioGenerator::getBlockSize() const
|
Chris@43
|
313 {
|
Chris@305
|
314 return m_processingBlockSize;
|
Chris@43
|
315 }
|
Chris@43
|
316
|
Chris@43
|
317 void
|
Chris@43
|
318 AudioGenerator::setSoloModelSet(std::set<Model *> s)
|
Chris@43
|
319 {
|
Chris@43
|
320 QMutexLocker locker(&m_mutex);
|
Chris@43
|
321
|
Chris@43
|
322 m_soloModelSet = s;
|
Chris@43
|
323 m_soloing = true;
|
Chris@43
|
324 }
|
Chris@43
|
325
|
Chris@43
|
326 void
|
Chris@43
|
327 AudioGenerator::clearSoloModelSet()
|
Chris@43
|
328 {
|
Chris@43
|
329 QMutexLocker locker(&m_mutex);
|
Chris@43
|
330
|
Chris@43
|
331 m_soloModelSet.clear();
|
Chris@43
|
332 m_soloing = false;
|
Chris@43
|
333 }
|
Chris@43
|
334
|
Chris@366
|
335 int
|
Chris@366
|
336 AudioGenerator::mixModel(Model *model, int startFrame, int frameCount,
|
Chris@366
|
337 float **buffer, int fadeIn, int fadeOut)
|
Chris@43
|
338 {
|
Chris@43
|
339 if (m_sourceSampleRate == 0) {
|
Chris@293
|
340 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
|
Chris@43
|
341 return frameCount;
|
Chris@43
|
342 }
|
Chris@43
|
343
|
Chris@43
|
344 QMutexLocker locker(&m_mutex);
|
Chris@43
|
345
|
Chris@108
|
346 Playable *playable = model;
|
Chris@108
|
347 if (!playable || !playable->canPlay()) return frameCount;
|
Chris@108
|
348
|
Chris@43
|
349 PlayParameters *parameters =
|
Chris@108
|
350 PlayParameterRepository::getInstance()->getPlayParameters(playable);
|
Chris@43
|
351 if (!parameters) return frameCount;
|
Chris@43
|
352
|
Chris@43
|
353 bool playing = !parameters->isPlayMuted();
|
Chris@43
|
354 if (!playing) {
|
Chris@43
|
355 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@293
|
356 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
|
Chris@43
|
357 #endif
|
Chris@43
|
358 return frameCount;
|
Chris@43
|
359 }
|
Chris@43
|
360
|
Chris@43
|
361 if (m_soloing) {
|
Chris@43
|
362 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
|
Chris@43
|
363 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@293
|
364 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
|
Chris@43
|
365 #endif
|
Chris@43
|
366 return frameCount;
|
Chris@43
|
367 }
|
Chris@43
|
368 }
|
Chris@43
|
369
|
Chris@43
|
370 float gain = parameters->getPlayGain();
|
Chris@43
|
371 float pan = parameters->getPlayPan();
|
Chris@43
|
372
|
Chris@43
|
373 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
|
Chris@43
|
374 if (dtvm) {
|
Chris@43
|
375 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
|
Chris@43
|
376 buffer, gain, pan, fadeIn, fadeOut);
|
Chris@43
|
377 }
|
Chris@43
|
378
|
Chris@313
|
379 if (usesClipMixer(model)) {
|
Chris@313
|
380 return mixClipModel(model, startFrame, frameCount,
|
Chris@313
|
381 buffer, gain, pan);
|
Chris@313
|
382 }
|
Chris@43
|
383
|
Chris@313
|
384 if (usesContinuousSynth(model)) {
|
Chris@313
|
385 return mixContinuousSynthModel(model, startFrame, frameCount,
|
Chris@313
|
386 buffer, gain, pan);
|
Chris@43
|
387 }
|
Chris@43
|
388
|
Chris@276
|
389 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
|
Chris@276
|
390
|
Chris@43
|
391 return frameCount;
|
Chris@43
|
392 }
|
Chris@43
|
393
|
Chris@366
|
394 int
|
Chris@43
|
395 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
|
Chris@366
|
396 int startFrame, int frames,
|
Chris@43
|
397 float **buffer, float gain, float pan,
|
Chris@366
|
398 int fadeIn, int fadeOut)
|
Chris@43
|
399 {
|
Chris@382
|
400 int maxFrames = frames + std::max(fadeIn, fadeOut);
|
Chris@43
|
401
|
Chris@366
|
402 int modelChannels = dtvm->getChannelCount();
|
Chris@80
|
403
|
Chris@382
|
404 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
|
Chris@80
|
405
|
Chris@382
|
406 for (int c = 0; c < m_channelBufCount; ++c) {
|
Chris@382
|
407 delete[] m_channelBuffer[c];
|
Chris@80
|
408 }
|
Chris@80
|
409
|
Chris@382
|
410 delete[] m_channelBuffer;
|
Chris@382
|
411 m_channelBuffer = new float *[modelChannels];
|
Chris@80
|
412
|
Chris@366
|
413 for (int c = 0; c < modelChannels; ++c) {
|
Chris@382
|
414 m_channelBuffer[c] = new float[maxFrames];
|
Chris@80
|
415 }
|
Chris@80
|
416
|
Chris@382
|
417 m_channelBufCount = modelChannels;
|
Chris@382
|
418 m_channelBufSiz = maxFrames;
|
Chris@43
|
419 }
|
Chris@80
|
420
|
Chris@366
|
421 int got = 0;
|
Chris@80
|
422
|
Chris@80
|
423 if (startFrame >= fadeIn/2) {
|
Chris@80
|
424 got = dtvm->getData(0, modelChannels - 1,
|
Chris@80
|
425 startFrame - fadeIn/2,
|
Chris@80
|
426 frames + fadeOut/2 + fadeIn/2,
|
Chris@382
|
427 m_channelBuffer);
|
Chris@80
|
428 } else {
|
Chris@366
|
429 int missing = fadeIn/2 - startFrame;
|
Chris@80
|
430
|
Chris@366
|
431 for (int c = 0; c < modelChannels; ++c) {
|
Chris@382
|
432 m_channelBuffer[c] += missing;
|
Chris@382
|
433 }
|
Chris@382
|
434
|
Chris@382
|
435 if (missing > 0) {
|
Chris@382
|
436 cerr << "note: channelBufSiz = " << m_channelBufSiz
|
Chris@382
|
437 << ", frames + fadeOut/2 = " << frames + fadeOut/2
|
Chris@382
|
438 << ", startFrame = " << startFrame
|
Chris@382
|
439 << ", missing = " << missing << endl;
|
Chris@80
|
440 }
|
Chris@80
|
441
|
Chris@80
|
442 got = dtvm->getData(0, modelChannels - 1,
|
Chris@80
|
443 startFrame,
|
Chris@80
|
444 frames + fadeOut/2,
|
Chris@382
|
445 m_channelBuffer);
|
Chris@80
|
446
|
Chris@366
|
447 for (int c = 0; c < modelChannels; ++c) {
|
Chris@382
|
448 m_channelBuffer[c] -= missing;
|
Chris@80
|
449 }
|
Chris@80
|
450
|
Chris@80
|
451 got += missing;
|
Chris@80
|
452 }
|
Chris@43
|
453
|
Chris@366
|
454 for (int c = 0; c < m_targetChannelCount; ++c) {
|
Chris@43
|
455
|
Chris@366
|
456 int sourceChannel = (c % modelChannels);
|
Chris@43
|
457
|
Chris@233
|
458 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
|
Chris@43
|
459
|
Chris@43
|
460 float channelGain = gain;
|
Chris@43
|
461 if (pan != 0.0) {
|
Chris@43
|
462 if (c == 0) {
|
Chris@43
|
463 if (pan > 0.0) channelGain *= 1.0 - pan;
|
Chris@43
|
464 } else {
|
Chris@43
|
465 if (pan < 0.0) channelGain *= pan + 1.0;
|
Chris@43
|
466 }
|
Chris@43
|
467 }
|
Chris@43
|
468
|
Chris@366
|
469 for (int i = 0; i < fadeIn/2; ++i) {
|
Chris@43
|
470 float *back = buffer[c];
|
Chris@43
|
471 back -= fadeIn/2;
|
Chris@382
|
472 back[i] += (channelGain * m_channelBuffer[sourceChannel][i] * i) / fadeIn;
|
Chris@43
|
473 }
|
Chris@43
|
474
|
Chris@366
|
475 for (int i = 0; i < frames + fadeOut/2; ++i) {
|
Chris@43
|
476 float mult = channelGain;
|
Chris@43
|
477 if (i < fadeIn/2) {
|
Chris@43
|
478 mult = (mult * i) / fadeIn;
|
Chris@43
|
479 }
|
Chris@43
|
480 if (i > frames - fadeOut/2) {
|
Chris@43
|
481 mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut;
|
Chris@43
|
482 }
|
Chris@382
|
483 float val = m_channelBuffer[sourceChannel][i];
|
Chris@80
|
484 if (i >= got) val = 0.f;
|
Chris@80
|
485 buffer[c][i] += mult * val;
|
Chris@43
|
486 }
|
Chris@43
|
487 }
|
Chris@43
|
488
|
Chris@43
|
489 return got;
|
Chris@43
|
490 }
|
Chris@43
|
491
|
Chris@366
|
492 int
|
Chris@313
|
493 AudioGenerator::mixClipModel(Model *model,
|
Chris@366
|
494 int startFrame, int frames,
|
Chris@313
|
495 float **buffer, float gain, float pan)
|
Chris@43
|
496 {
|
Chris@308
|
497 ClipMixer *clipMixer = m_clipMixerMap[model];
|
Chris@308
|
498 if (!clipMixer) return 0;
|
Chris@43
|
499
|
Chris@366
|
500 int blocks = frames / m_processingBlockSize;
|
Chris@43
|
501
|
Chris@313
|
502 //!!! todo: the below -- it matters
|
Chris@313
|
503
|
Chris@43
|
504 //!!! hang on -- the fact that the audio callback play source's
|
Chris@43
|
505 //buffer is a multiple of the plugin's buffer size doesn't mean
|
Chris@43
|
506 //that we always get called for a multiple of it here (because it
|
Chris@43
|
507 //also depends on the JACK block size). how should we ensure that
|
Chris@43
|
508 //all models write the same amount in to the mix, and that we
|
Chris@43
|
509 //always have a multiple of the plugin buffer size? I guess this
|
Chris@43
|
510 //class has to be queryable for the plugin buffer size & the
|
Chris@43
|
511 //callback play source has to use that as a multiple for all the
|
Chris@43
|
512 //calls to mixModel
|
Chris@43
|
513
|
Chris@366
|
514 int got = blocks * m_processingBlockSize;
|
Chris@43
|
515
|
Chris@43
|
516 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@313
|
517 cout << "mixModel [clip]: frames " << frames
|
Chris@293
|
518 << ", blocks " << blocks << endl;
|
Chris@43
|
519 #endif
|
Chris@43
|
520
|
Chris@308
|
521 ClipMixer::NoteStart on;
|
Chris@308
|
522 ClipMixer::NoteEnd off;
|
Chris@43
|
523
|
Chris@275
|
524 NoteOffSet ¬eOffs = m_noteOffs[model];
|
Chris@43
|
525
|
Chris@308
|
526 float **bufferIndexes = new float *[m_targetChannelCount];
|
Chris@308
|
527
|
Chris@366
|
528 for (int i = 0; i < blocks; ++i) {
|
Chris@43
|
529
|
Chris@366
|
530 int reqStart = startFrame + i * m_processingBlockSize;
|
Chris@43
|
531
|
Chris@299
|
532 NoteList notes;
|
Chris@299
|
533 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
|
Chris@299
|
534 if (exportable) {
|
Chris@366
|
535 notes = exportable->getNotesWithin(reqStart,
|
Chris@366
|
536 reqStart + m_processingBlockSize);
|
Chris@299
|
537 }
|
Chris@43
|
538
|
Chris@308
|
539 std::vector<ClipMixer::NoteStart> starts;
|
Chris@308
|
540 std::vector<ClipMixer::NoteEnd> ends;
|
Chris@43
|
541
|
Chris@275
|
542 for (NoteList::const_iterator ni = notes.begin();
|
Chris@275
|
543 ni != notes.end(); ++ni) {
|
Chris@43
|
544
|
Chris@366
|
545 int noteFrame = ni->start;
|
Chris@43
|
546
|
Chris@275
|
547 if (noteFrame < reqStart ||
|
Chris@305
|
548 noteFrame >= reqStart + m_processingBlockSize) continue;
|
Chris@43
|
549
|
Chris@43
|
550 while (noteOffs.begin() != noteOffs.end() &&
|
Chris@275
|
551 noteOffs.begin()->frame <= noteFrame) {
|
Chris@43
|
552
|
Chris@366
|
553 int eventFrame = noteOffs.begin()->frame;
|
Chris@308
|
554 if (eventFrame < reqStart) eventFrame = reqStart;
|
Chris@43
|
555
|
Chris@308
|
556 off.frameOffset = eventFrame - reqStart;
|
Chris@308
|
557 off.frequency = noteOffs.begin()->frequency;
|
Chris@43
|
558
|
Chris@43
|
559 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@313
|
560 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
|
Chris@43
|
561 #endif
|
Chris@43
|
562
|
Chris@308
|
563 ends.push_back(off);
|
Chris@43
|
564 noteOffs.erase(noteOffs.begin());
|
Chris@43
|
565 }
|
Chris@43
|
566
|
Chris@308
|
567 on.frameOffset = noteFrame - reqStart;
|
Chris@308
|
568 on.frequency = ni->getFrequency();
|
Chris@308
|
569 on.level = float(ni->velocity) / 127.0;
|
Chris@308
|
570 on.pan = pan;
|
Chris@43
|
571
|
Chris@43
|
572 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@346
|
573 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
|
Chris@43
|
574 #endif
|
Chris@43
|
575
|
Chris@308
|
576 starts.push_back(on);
|
Chris@275
|
577 noteOffs.insert
|
Chris@308
|
578 (NoteOff(on.frequency, noteFrame + ni->duration));
|
Chris@43
|
579 }
|
Chris@43
|
580
|
Chris@43
|
581 while (noteOffs.begin() != noteOffs.end() &&
|
Chris@308
|
582 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
|
Chris@43
|
583
|
Chris@366
|
584 int eventFrame = noteOffs.begin()->frame;
|
Chris@308
|
585 if (eventFrame < reqStart) eventFrame = reqStart;
|
Chris@43
|
586
|
Chris@308
|
587 off.frameOffset = eventFrame - reqStart;
|
Chris@308
|
588 off.frequency = noteOffs.begin()->frequency;
|
Chris@43
|
589
|
Chris@43
|
590 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@313
|
591 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
|
Chris@43
|
592 #endif
|
Chris@43
|
593
|
Chris@308
|
594 ends.push_back(off);
|
Chris@308
|
595 noteOffs.erase(noteOffs.begin());
|
Chris@43
|
596 }
|
Chris@43
|
597
|
Chris@366
|
598 for (int c = 0; c < m_targetChannelCount; ++c) {
|
Chris@308
|
599 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
|
Chris@308
|
600 }
|
Chris@43
|
601
|
Chris@308
|
602 clipMixer->mix(bufferIndexes, gain, starts, ends);
|
Chris@308
|
603 }
|
Chris@43
|
604
|
Chris@308
|
605 delete[] bufferIndexes;
|
Chris@43
|
606
|
Chris@43
|
607 return got;
|
Chris@43
|
608 }
|
Chris@313
|
609
|
Chris@366
|
610 int
|
Chris@313
|
611 AudioGenerator::mixContinuousSynthModel(Model *model,
|
Chris@366
|
612 int startFrame,
|
Chris@366
|
613 int frames,
|
Chris@313
|
614 float **buffer,
|
Chris@313
|
615 float gain,
|
Chris@313
|
616 float pan)
|
Chris@313
|
617 {
|
Chris@313
|
618 ContinuousSynth *synth = m_continuousSynthMap[model];
|
Chris@313
|
619 if (!synth) return 0;
|
Chris@313
|
620
|
Chris@313
|
621 // only type we support here at the moment
|
Chris@313
|
622 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
|
Chris@313
|
623 if (stvm->getScaleUnits() != "Hz") return 0;
|
Chris@313
|
624
|
Chris@366
|
625 int blocks = frames / m_processingBlockSize;
|
Chris@313
|
626
|
Chris@313
|
627 //!!! todo: see comment in mixClipModel
|
Chris@313
|
628
|
Chris@366
|
629 int got = blocks * m_processingBlockSize;
|
Chris@313
|
630
|
Chris@313
|
631 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@313
|
632 cout << "mixModel [synth]: frames " << frames
|
Chris@313
|
633 << ", blocks " << blocks << endl;
|
Chris@313
|
634 #endif
|
Chris@313
|
635
|
Chris@313
|
636 float **bufferIndexes = new float *[m_targetChannelCount];
|
Chris@313
|
637
|
Chris@366
|
638 for (int i = 0; i < blocks; ++i) {
|
Chris@313
|
639
|
Chris@366
|
640 int reqStart = startFrame + i * m_processingBlockSize;
|
Chris@313
|
641
|
Chris@366
|
642 for (int c = 0; c < m_targetChannelCount; ++c) {
|
Chris@313
|
643 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
|
Chris@313
|
644 }
|
Chris@313
|
645
|
Chris@313
|
646 SparseTimeValueModel::PointList points =
|
Chris@313
|
647 stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
|
Chris@313
|
648
|
Chris@313
|
649 // by default, repeat last frequency
|
Chris@313
|
650 float f0 = 0.f;
|
Chris@313
|
651
|
Chris@313
|
652 // go straight to the last freq that is genuinely in this range
|
Chris@313
|
653 for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
|
Chris@313
|
654 itr != points.begin(); ) {
|
Chris@313
|
655 --itr;
|
Chris@313
|
656 if (itr->frame >= reqStart &&
|
Chris@313
|
657 itr->frame < reqStart + m_processingBlockSize) {
|
Chris@313
|
658 f0 = itr->value;
|
Chris@313
|
659 break;
|
Chris@313
|
660 }
|
Chris@313
|
661 }
|
Chris@313
|
662
|
Chris@314
|
663 // if we found no such frequency and the next point is further
|
Chris@314
|
664 // away than twice the model resolution, go silent (same
|
Chris@314
|
665 // criterion TimeValueLayer uses for ending a discrete curve
|
Chris@314
|
666 // segment)
|
Chris@314
|
667 if (f0 == 0.f) {
|
Chris@314
|
668 SparseTimeValueModel::PointList nextPoints =
|
Chris@314
|
669 stvm->getNextPoints(reqStart + m_processingBlockSize);
|
Chris@314
|
670 if (nextPoints.empty() ||
|
Chris@314
|
671 nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
|
Chris@314
|
672 f0 = -1.f;
|
Chris@314
|
673 }
|
Chris@314
|
674 }
|
Chris@314
|
675
|
Chris@315
|
676 // cerr << "f0 = " << f0 << endl;
|
Chris@313
|
677
|
Chris@313
|
678 synth->mix(bufferIndexes,
|
Chris@313
|
679 gain,
|
Chris@313
|
680 pan,
|
Chris@313
|
681 f0);
|
Chris@313
|
682 }
|
Chris@313
|
683
|
Chris@313
|
684 delete[] bufferIndexes;
|
Chris@313
|
685
|
Chris@313
|
686 return got;
|
Chris@313
|
687 }
|
Chris@313
|
688
|