Chris@43
|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
|
Chris@43
|
2
|
Chris@43
|
3 /*
|
Chris@43
|
4 Sonic Visualiser
|
Chris@43
|
5 An audio file viewer and annotation editor.
|
Chris@43
|
6 Centre for Digital Music, Queen Mary, University of London.
|
Chris@43
|
7 This file copyright 2006 Chris Cannam.
|
Chris@43
|
8
|
Chris@43
|
9 This program is free software; you can redistribute it and/or
|
Chris@43
|
10 modify it under the terms of the GNU General Public License as
|
Chris@43
|
11 published by the Free Software Foundation; either version 2 of the
|
Chris@43
|
12 License, or (at your option) any later version. See the file
|
Chris@43
|
13 COPYING included with this distribution for more information.
|
Chris@43
|
14 */
|
Chris@43
|
15
|
Chris@43
|
16 #include "AudioGenerator.h"
|
Chris@43
|
17
|
Chris@43
|
18 #include "base/TempDirectory.h"
|
Chris@43
|
19 #include "base/PlayParameters.h"
|
Chris@43
|
20 #include "base/PlayParameterRepository.h"
|
Chris@43
|
21 #include "base/Pitch.h"
|
Chris@43
|
22 #include "base/Exceptions.h"
|
Chris@43
|
23
|
Chris@43
|
24 #include "data/model/NoteModel.h"
|
Chris@278
|
25 #include "data/model/FlexiNoteModel.h"
|
Chris@43
|
26 #include "data/model/DenseTimeValueModel.h"
|
Chris@313
|
27 #include "data/model/SparseTimeValueModel.h"
|
Chris@43
|
28 #include "data/model/SparseOneDimensionalModel.h"
|
Chris@299
|
29 #include "data/model/NoteData.h"
|
Chris@43
|
30
|
Chris@307
|
31 #include "ClipMixer.h"
|
Chris@313
|
32 #include "ContinuousSynth.h"
|
Chris@307
|
33
|
Chris@43
|
34 #include <iostream>
|
Chris@167
|
35 #include <cmath>
|
Chris@43
|
36
|
Chris@43
|
37 #include <QDir>
|
Chris@43
|
38 #include <QFile>
|
Chris@43
|
39
|
Chris@436
|
40 const sv_frame_t
|
Chris@315
|
41 AudioGenerator::m_processingBlockSize = 1024;
|
Chris@43
|
42
|
Chris@43
|
43 QString
|
Chris@43
|
44 AudioGenerator::m_sampleDir = "";
|
Chris@43
|
45
|
Chris@43
|
46 //#define DEBUG_AUDIO_GENERATOR 1
|
Chris@43
|
47
|
Chris@43
|
48 AudioGenerator::AudioGenerator() :
|
Chris@43
|
49 m_sourceSampleRate(0),
|
Chris@43
|
50 m_targetChannelCount(1),
|
Chris@348
|
51 m_waveType(0),
|
Chris@382
|
52 m_soloing(false),
|
Chris@382
|
53 m_channelBuffer(0),
|
Chris@382
|
54 m_channelBufSiz(0),
|
Chris@382
|
55 m_channelBufCount(0)
|
Chris@43
|
56 {
|
Chris@108
|
57 initialiseSampleDir();
|
Chris@43
|
58
|
Chris@43
|
59 connect(PlayParameterRepository::getInstance(),
|
Chris@309
|
60 SIGNAL(playClipIdChanged(const Playable *, QString)),
|
Chris@43
|
61 this,
|
Chris@309
|
62 SLOT(playClipIdChanged(const Playable *, QString)));
|
Chris@43
|
63 }
|
Chris@43
|
64
|
Chris@43
|
65 AudioGenerator::~AudioGenerator()
|
Chris@43
|
66 {
|
Chris@177
|
67 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@596
|
68 cerr << "AudioGenerator::~AudioGenerator" << endl;
|
Chris@177
|
69 #endif
|
Chris@593
|
70
|
Chris@593
|
71 for (int i = 0; i < m_channelBufCount; ++i) {
|
Chris@593
|
72 delete[] m_channelBuffer[i];
|
Chris@593
|
73 }
|
Chris@593
|
74 delete[] m_channelBuffer;
|
Chris@43
|
75 }
|
Chris@43
|
76
|
Chris@108
|
77 void
|
Chris@108
|
78 AudioGenerator::initialiseSampleDir()
|
Chris@43
|
79 {
|
Chris@108
|
80 if (m_sampleDir != "") return;
|
Chris@108
|
81
|
Chris@108
|
82 try {
|
Chris@108
|
83 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
|
Chris@598
|
84 } catch (const DirectoryCreationFailed &f) {
|
Chris@293
|
85 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
|
Chris@108
|
86 << " Failed to create temporary sample directory"
|
Chris@293
|
87 << endl;
|
Chris@108
|
88 m_sampleDir = "";
|
Chris@108
|
89 return;
|
Chris@108
|
90 }
|
Chris@108
|
91
|
Chris@108
|
92 QDir sampleResourceDir(":/samples", "*.wav");
|
Chris@108
|
93
|
Chris@108
|
94 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
|
Chris@108
|
95
|
Chris@108
|
96 QString fileName(sampleResourceDir[i]);
|
Chris@108
|
97 QFile file(sampleResourceDir.filePath(fileName));
|
Chris@151
|
98 QString target = QDir(m_sampleDir).filePath(fileName);
|
Chris@108
|
99
|
Chris@151
|
100 if (!file.copy(target)) {
|
Chris@293
|
101 cerr << "WARNING: AudioGenerator::getSampleDir: "
|
Chris@294
|
102 << "Unable to copy " << fileName
|
Chris@108
|
103 << " into temporary directory \""
|
Chris@293
|
104 << m_sampleDir << "\"" << endl;
|
Chris@151
|
105 } else {
|
Chris@151
|
106 QFile tf(target);
|
Chris@151
|
107 tf.setPermissions(tf.permissions() |
|
Chris@151
|
108 QFile::WriteOwner |
|
Chris@151
|
109 QFile::WriteUser);
|
Chris@108
|
110 }
|
Chris@43
|
111 }
|
Chris@43
|
112 }
|
Chris@43
|
113
|
Chris@43
|
114 bool
|
Chris@43
|
115 AudioGenerator::addModel(Model *model)
|
Chris@43
|
116 {
|
Chris@43
|
117 if (m_sourceSampleRate == 0) {
|
Chris@43
|
118
|
Chris@595
|
119 m_sourceSampleRate = model->getSampleRate();
|
Chris@43
|
120
|
Chris@43
|
121 } else {
|
Chris@43
|
122
|
Chris@595
|
123 DenseTimeValueModel *dtvm =
|
Chris@595
|
124 dynamic_cast<DenseTimeValueModel *>(model);
|
Chris@43
|
125
|
Chris@595
|
126 if (dtvm) {
|
Chris@595
|
127 m_sourceSampleRate = model->getSampleRate();
|
Chris@595
|
128 return true;
|
Chris@595
|
129 }
|
Chris@43
|
130 }
|
Chris@307
|
131
|
Chris@418
|
132 const Playable *playable = model;
|
Chris@418
|
133 if (!playable || !playable->canPlay()) return 0;
|
Chris@418
|
134
|
Chris@418
|
135 PlayParameters *parameters =
|
Chris@595
|
136 PlayParameterRepository::getInstance()->getPlayParameters(playable);
|
Chris@418
|
137
|
Chris@418
|
138 bool willPlay = !parameters->isPlayMuted();
|
Chris@418
|
139
|
Chris@313
|
140 if (usesClipMixer(model)) {
|
Chris@313
|
141 ClipMixer *mixer = makeClipMixerFor(model);
|
Chris@313
|
142 if (mixer) {
|
Chris@313
|
143 QMutexLocker locker(&m_mutex);
|
Chris@616
|
144 m_clipMixerMap[model->getId()] = mixer;
|
Chris@418
|
145 return willPlay;
|
Chris@313
|
146 }
|
Chris@313
|
147 }
|
Chris@313
|
148
|
Chris@313
|
149 if (usesContinuousSynth(model)) {
|
Chris@313
|
150 ContinuousSynth *synth = makeSynthFor(model);
|
Chris@313
|
151 if (synth) {
|
Chris@313
|
152 QMutexLocker locker(&m_mutex);
|
Chris@616
|
153 m_continuousSynthMap[model->getId()] = synth;
|
Chris@418
|
154 return willPlay;
|
Chris@313
|
155 }
|
Chris@43
|
156 }
|
Chris@307
|
157
|
Chris@43
|
158 return false;
|
Chris@43
|
159 }
|
Chris@43
|
160
|
Chris@43
|
161 void
|
Chris@309
|
162 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
|
Chris@43
|
163 {
|
Chris@108
|
164 const Model *model = dynamic_cast<const Model *>(playable);
|
Chris@108
|
165 if (!model) {
|
Chris@309
|
166 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
|
Chris@108
|
167 << playable << " is not a supported model type"
|
Chris@293
|
168 << endl;
|
Chris@108
|
169 return;
|
Chris@108
|
170 }
|
Chris@108
|
171
|
Chris@616
|
172 if (m_clipMixerMap.find(model->getId()) == m_clipMixerMap.end()) {
|
Chris@616
|
173 return;
|
Chris@616
|
174 }
|
Chris@307
|
175
|
Chris@307
|
176 ClipMixer *mixer = makeClipMixerFor(model);
|
Chris@307
|
177 if (mixer) {
|
Chris@43
|
178 QMutexLocker locker(&m_mutex);
|
Chris@616
|
179 m_clipMixerMap[model->getId()] = mixer;
|
Chris@43
|
180 }
|
Chris@43
|
181 }
|
Chris@308
|
182
|
Chris@313
|
183 bool
|
Chris@313
|
184 AudioGenerator::usesClipMixer(const Model *model)
|
Chris@43
|
185 {
|
Chris@313
|
186 bool clip =
|
Chris@313
|
187 (qobject_cast<const SparseOneDimensionalModel *>(model) ||
|
Chris@313
|
188 qobject_cast<const NoteModel *>(model) ||
|
Chris@313
|
189 qobject_cast<const FlexiNoteModel *>(model));
|
Chris@313
|
190 return clip;
|
Chris@43
|
191 }
|
Chris@43
|
192
|
Chris@313
|
193 bool
|
Chris@349
|
194 AudioGenerator::wantsQuieterClips(const Model *model)
|
Chris@349
|
195 {
|
Chris@349
|
196 // basically, anything that usually has sustain (like notes) or
|
Chris@349
|
197 // often has multiple sounds at once (like notes) wants to use a
|
Chris@349
|
198 // quieter level than simple click tracks
|
Chris@349
|
199 bool does =
|
Chris@349
|
200 (qobject_cast<const NoteModel *>(model) ||
|
Chris@349
|
201 qobject_cast<const FlexiNoteModel *>(model));
|
Chris@349
|
202 return does;
|
Chris@349
|
203 }
|
Chris@349
|
204
|
Chris@349
|
205 bool
|
Chris@313
|
206 AudioGenerator::usesContinuousSynth(const Model *model)
|
Chris@43
|
207 {
|
Chris@313
|
208 bool cont =
|
Chris@313
|
209 (qobject_cast<const SparseTimeValueModel *>(model));
|
Chris@313
|
210 return cont;
|
Chris@313
|
211 }
|
Chris@313
|
212
|
Chris@307
|
213 ClipMixer *
|
Chris@307
|
214 AudioGenerator::makeClipMixerFor(const Model *model)
|
Chris@43
|
215 {
|
Chris@309
|
216 QString clipId;
|
Chris@43
|
217
|
Chris@108
|
218 const Playable *playable = model;
|
Chris@108
|
219 if (!playable || !playable->canPlay()) return 0;
|
Chris@108
|
220
|
Chris@43
|
221 PlayParameters *parameters =
|
Chris@595
|
222 PlayParameterRepository::getInstance()->getPlayParameters(playable);
|
Chris@43
|
223 if (parameters) {
|
Chris@309
|
224 clipId = parameters->getPlayClipId();
|
Chris@43
|
225 }
|
Chris@43
|
226
|
Chris@445
|
227 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@309
|
228 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
|
Chris@445
|
229 #endif
|
Chris@276
|
230
|
Chris@309
|
231 if (clipId == "") {
|
Chris@308
|
232 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
|
Chris@276
|
233 return 0;
|
Chris@276
|
234 }
|
Chris@43
|
235
|
Chris@308
|
236 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
|
Chris@308
|
237 m_sourceSampleRate,
|
Chris@308
|
238 m_processingBlockSize);
|
Chris@307
|
239
|
Chris@436
|
240 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required
|
Chris@307
|
241
|
Chris@309
|
242 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
|
Chris@307
|
243
|
Chris@436
|
244 double level = wantsQuieterClips(model) ? 0.5 : 1.0;
|
Chris@349
|
245 if (!mixer->loadClipData(clipPath, clipF0, level)) {
|
Chris@308
|
246 delete mixer;
|
Chris@43
|
247 return 0;
|
Chris@43
|
248 }
|
Chris@43
|
249
|
Chris@445
|
250 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@309
|
251 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
|
Chris@445
|
252 #endif
|
Chris@43
|
253
|
Chris@308
|
254 return mixer;
|
Chris@308
|
255 }
|
Chris@43
|
256
|
Chris@313
|
257 ContinuousSynth *
|
Chris@313
|
258 AudioGenerator::makeSynthFor(const Model *model)
|
Chris@313
|
259 {
|
Chris@313
|
260 const Playable *playable = model;
|
Chris@313
|
261 if (!playable || !playable->canPlay()) return 0;
|
Chris@313
|
262
|
Chris@313
|
263 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
|
Chris@313
|
264 m_sourceSampleRate,
|
rmb456@323
|
265 m_processingBlockSize,
|
rmb456@323
|
266 m_waveType);
|
Chris@313
|
267
|
Chris@445
|
268 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@313
|
269 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
|
Chris@445
|
270 #endif
|
Chris@313
|
271
|
Chris@313
|
272 return synth;
|
Chris@313
|
273 }
|
Chris@313
|
274
|
Chris@43
|
275 void
|
Chris@43
|
276 AudioGenerator::removeModel(Model *model)
|
Chris@43
|
277 {
|
Chris@43
|
278 SparseOneDimensionalModel *sodm =
|
Chris@595
|
279 dynamic_cast<SparseOneDimensionalModel *>(model);
|
Chris@43
|
280 if (!sodm) return; // nothing to do
|
Chris@43
|
281
|
Chris@43
|
282 QMutexLocker locker(&m_mutex);
|
Chris@43
|
283
|
Chris@616
|
284 if (m_clipMixerMap.find(sodm->getId()) == m_clipMixerMap.end()) {
|
Chris@616
|
285 return;
|
Chris@616
|
286 }
|
Chris@43
|
287
|
Chris@616
|
288 ClipMixer *mixer = m_clipMixerMap[sodm->getId()];
|
Chris@616
|
289 m_clipMixerMap.erase(sodm->getId());
|
Chris@308
|
290 delete mixer;
|
Chris@43
|
291 }
|
Chris@43
|
292
|
Chris@43
|
293 void
|
Chris@43
|
294 AudioGenerator::clearModels()
|
Chris@43
|
295 {
|
Chris@43
|
296 QMutexLocker locker(&m_mutex);
|
Chris@308
|
297
|
Chris@308
|
298 while (!m_clipMixerMap.empty()) {
|
Chris@308
|
299 ClipMixer *mixer = m_clipMixerMap.begin()->second;
|
Chris@595
|
300 m_clipMixerMap.erase(m_clipMixerMap.begin());
|
Chris@595
|
301 delete mixer;
|
Chris@43
|
302 }
|
Chris@43
|
303 }
|
Chris@43
|
304
|
Chris@43
|
305 void
|
Chris@43
|
306 AudioGenerator::reset()
|
Chris@43
|
307 {
|
Chris@43
|
308 QMutexLocker locker(&m_mutex);
|
Chris@308
|
309
|
Chris@445
|
310 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@397
|
311 cerr << "AudioGenerator::reset()" << endl;
|
Chris@445
|
312 #endif
|
Chris@397
|
313
|
Chris@616
|
314 for (ClipMixerMap::iterator i = m_clipMixerMap.begin();
|
Chris@616
|
315 i != m_clipMixerMap.end(); ++i) {
|
Chris@595
|
316 if (i->second) {
|
Chris@595
|
317 i->second->reset();
|
Chris@595
|
318 }
|
Chris@43
|
319 }
|
Chris@43
|
320
|
Chris@43
|
321 m_noteOffs.clear();
|
Chris@43
|
322 }
|
Chris@43
|
323
|
Chris@43
|
324 void
|
Chris@366
|
325 AudioGenerator::setTargetChannelCount(int targetChannelCount)
|
Chris@43
|
326 {
|
Chris@43
|
327 if (m_targetChannelCount == targetChannelCount) return;
|
Chris@43
|
328
|
Chris@233
|
329 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
|
Chris@43
|
330
|
Chris@43
|
331 QMutexLocker locker(&m_mutex);
|
Chris@43
|
332 m_targetChannelCount = targetChannelCount;
|
Chris@43
|
333
|
Chris@308
|
334 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
|
Chris@595
|
335 if (i->second) i->second->setChannelCount(targetChannelCount);
|
Chris@43
|
336 }
|
Chris@43
|
337 }
|
Chris@43
|
338
|
Chris@436
|
339 sv_frame_t
|
Chris@43
|
340 AudioGenerator::getBlockSize() const
|
Chris@43
|
341 {
|
Chris@305
|
342 return m_processingBlockSize;
|
Chris@43
|
343 }
|
Chris@43
|
344
|
Chris@43
|
345 void
|
Chris@43
|
346 AudioGenerator::setSoloModelSet(std::set<Model *> s)
|
Chris@43
|
347 {
|
Chris@43
|
348 QMutexLocker locker(&m_mutex);
|
Chris@43
|
349
|
Chris@43
|
350 m_soloModelSet = s;
|
Chris@43
|
351 m_soloing = true;
|
Chris@43
|
352 }
|
Chris@43
|
353
|
Chris@43
|
354 void
|
Chris@43
|
355 AudioGenerator::clearSoloModelSet()
|
Chris@43
|
356 {
|
Chris@43
|
357 QMutexLocker locker(&m_mutex);
|
Chris@43
|
358
|
Chris@43
|
359 m_soloModelSet.clear();
|
Chris@43
|
360 m_soloing = false;
|
Chris@43
|
361 }
|
Chris@43
|
362
|
Chris@436
|
363 sv_frame_t
|
Chris@613
|
364 AudioGenerator::mixModel(Model *model,
|
Chris@613
|
365 sv_frame_t startFrame, sv_frame_t frameCount,
|
Chris@613
|
366 float **buffer,
|
Chris@613
|
367 sv_frame_t fadeIn, sv_frame_t fadeOut)
|
Chris@43
|
368 {
|
Chris@43
|
369 if (m_sourceSampleRate == 0) {
|
Chris@595
|
370 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
|
Chris@595
|
371 return frameCount;
|
Chris@43
|
372 }
|
Chris@43
|
373
|
Chris@43
|
374 QMutexLocker locker(&m_mutex);
|
Chris@43
|
375
|
Chris@108
|
376 Playable *playable = model;
|
Chris@108
|
377 if (!playable || !playable->canPlay()) return frameCount;
|
Chris@108
|
378
|
Chris@43
|
379 PlayParameters *parameters =
|
Chris@595
|
380 PlayParameterRepository::getInstance()->getPlayParameters(playable);
|
Chris@43
|
381 if (!parameters) return frameCount;
|
Chris@43
|
382
|
Chris@43
|
383 bool playing = !parameters->isPlayMuted();
|
Chris@43
|
384 if (!playing) {
|
Chris@43
|
385 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@293
|
386 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
|
Chris@43
|
387 #endif
|
Chris@43
|
388 return frameCount;
|
Chris@43
|
389 }
|
Chris@43
|
390
|
Chris@43
|
391 if (m_soloing) {
|
Chris@43
|
392 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
|
Chris@43
|
393 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@293
|
394 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
|
Chris@43
|
395 #endif
|
Chris@43
|
396 return frameCount;
|
Chris@43
|
397 }
|
Chris@43
|
398 }
|
Chris@43
|
399
|
Chris@43
|
400 float gain = parameters->getPlayGain();
|
Chris@43
|
401 float pan = parameters->getPlayPan();
|
Chris@43
|
402
|
Chris@43
|
403 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
|
Chris@43
|
404 if (dtvm) {
|
Chris@595
|
405 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
|
Chris@595
|
406 buffer, gain, pan, fadeIn, fadeOut);
|
Chris@43
|
407 }
|
Chris@43
|
408
|
Chris@313
|
409 if (usesClipMixer(model)) {
|
Chris@313
|
410 return mixClipModel(model, startFrame, frameCount,
|
Chris@313
|
411 buffer, gain, pan);
|
Chris@313
|
412 }
|
Chris@43
|
413
|
Chris@313
|
414 if (usesContinuousSynth(model)) {
|
Chris@313
|
415 return mixContinuousSynthModel(model, startFrame, frameCount,
|
Chris@313
|
416 buffer, gain, pan);
|
Chris@43
|
417 }
|
Chris@43
|
418
|
Chris@276
|
419 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
|
Chris@276
|
420
|
Chris@43
|
421 return frameCount;
|
Chris@43
|
422 }
|
Chris@43
|
423
|
Chris@436
|
424 sv_frame_t
|
Chris@43
|
425 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
|
Chris@595
|
426 sv_frame_t startFrame, sv_frame_t frames,
|
Chris@595
|
427 float **buffer, float gain, float pan,
|
Chris@595
|
428 sv_frame_t fadeIn, sv_frame_t fadeOut)
|
Chris@43
|
429 {
|
Chris@436
|
430 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut);
|
Chris@43
|
431
|
Chris@366
|
432 int modelChannels = dtvm->getChannelCount();
|
Chris@80
|
433
|
Chris@382
|
434 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
|
Chris@80
|
435
|
Chris@382
|
436 for (int c = 0; c < m_channelBufCount; ++c) {
|
Chris@382
|
437 delete[] m_channelBuffer[c];
|
Chris@80
|
438 }
|
Chris@80
|
439
|
Chris@595
|
440 delete[] m_channelBuffer;
|
Chris@382
|
441 m_channelBuffer = new float *[modelChannels];
|
Chris@80
|
442
|
Chris@366
|
443 for (int c = 0; c < modelChannels; ++c) {
|
Chris@382
|
444 m_channelBuffer[c] = new float[maxFrames];
|
Chris@80
|
445 }
|
Chris@80
|
446
|
Chris@382
|
447 m_channelBufCount = modelChannels;
|
Chris@595
|
448 m_channelBufSiz = maxFrames;
|
Chris@43
|
449 }
|
Chris@80
|
450
|
Chris@436
|
451 sv_frame_t got = 0;
|
Chris@80
|
452
|
Chris@80
|
453 if (startFrame >= fadeIn/2) {
|
Chris@460
|
454
|
Chris@460
|
455 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
|
Chris@460
|
456 startFrame - fadeIn/2,
|
Chris@460
|
457 frames + fadeOut/2 + fadeIn/2);
|
Chris@460
|
458
|
Chris@460
|
459 for (int c = 0; c < modelChannels; ++c) {
|
Chris@460
|
460 copy(data[c].begin(), data[c].end(), m_channelBuffer[c]);
|
Chris@460
|
461 }
|
Chris@460
|
462
|
Chris@461
|
463 got = data[0].size();
|
Chris@460
|
464
|
Chris@80
|
465 } else {
|
Chris@436
|
466 sv_frame_t missing = fadeIn/2 - startFrame;
|
Chris@80
|
467
|
Chris@382
|
468 if (missing > 0) {
|
Chris@382
|
469 cerr << "note: channelBufSiz = " << m_channelBufSiz
|
Chris@382
|
470 << ", frames + fadeOut/2 = " << frames + fadeOut/2
|
Chris@382
|
471 << ", startFrame = " << startFrame
|
Chris@382
|
472 << ", missing = " << missing << endl;
|
Chris@80
|
473 }
|
Chris@80
|
474
|
Chris@460
|
475 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
|
Chris@460
|
476 startFrame,
|
Chris@460
|
477 frames + fadeOut/2);
|
Chris@366
|
478 for (int c = 0; c < modelChannels; ++c) {
|
Chris@460
|
479 copy(data[c].begin(), data[c].end(), m_channelBuffer[c] + missing);
|
Chris@80
|
480 }
|
Chris@80
|
481
|
Chris@461
|
482 got = data[0].size() + missing;
|
Chris@595
|
483 }
|
Chris@43
|
484
|
Chris@366
|
485 for (int c = 0; c < m_targetChannelCount; ++c) {
|
Chris@43
|
486
|
Chris@595
|
487 int sourceChannel = (c % modelChannels);
|
Chris@43
|
488
|
Chris@595
|
489 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
|
Chris@43
|
490
|
Chris@595
|
491 float channelGain = gain;
|
Chris@595
|
492 if (pan != 0.0) {
|
Chris@595
|
493 if (c == 0) {
|
Chris@595
|
494 if (pan > 0.0) channelGain *= 1.0f - pan;
|
Chris@595
|
495 } else {
|
Chris@595
|
496 if (pan < 0.0) channelGain *= pan + 1.0f;
|
Chris@595
|
497 }
|
Chris@595
|
498 }
|
Chris@43
|
499
|
Chris@595
|
500 for (sv_frame_t i = 0; i < fadeIn/2; ++i) {
|
Chris@595
|
501 float *back = buffer[c];
|
Chris@595
|
502 back -= fadeIn/2;
|
Chris@595
|
503 back[i] +=
|
Chris@436
|
504 (channelGain * m_channelBuffer[sourceChannel][i] * float(i))
|
Chris@436
|
505 / float(fadeIn);
|
Chris@595
|
506 }
|
Chris@43
|
507
|
Chris@595
|
508 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) {
|
Chris@595
|
509 float mult = channelGain;
|
Chris@595
|
510 if (i < fadeIn/2) {
|
Chris@595
|
511 mult = (mult * float(i)) / float(fadeIn);
|
Chris@595
|
512 }
|
Chris@595
|
513 if (i > frames - fadeOut/2) {
|
Chris@595
|
514 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut);
|
Chris@595
|
515 }
|
Chris@382
|
516 float val = m_channelBuffer[sourceChannel][i];
|
Chris@80
|
517 if (i >= got) val = 0.f;
|
Chris@595
|
518 buffer[c][i] += mult * val;
|
Chris@595
|
519 }
|
Chris@43
|
520 }
|
Chris@43
|
521
|
Chris@43
|
522 return got;
|
Chris@43
|
523 }
|
Chris@43
|
524
|
Chris@436
|
525 sv_frame_t
|
Chris@313
|
526 AudioGenerator::mixClipModel(Model *model,
|
Chris@436
|
527 sv_frame_t startFrame, sv_frame_t frames,
|
Chris@313
|
528 float **buffer, float gain, float pan)
|
Chris@43
|
529 {
|
Chris@616
|
530 ClipMixer *clipMixer = m_clipMixerMap[model->getId()];
|
Chris@308
|
531 if (!clipMixer) return 0;
|
Chris@43
|
532
|
Chris@436
|
533 int blocks = int(frames / m_processingBlockSize);
|
Chris@43
|
534
|
Chris@313
|
535 //!!! todo: the below -- it matters
|
Chris@313
|
536
|
Chris@43
|
537 //!!! hang on -- the fact that the audio callback play source's
|
Chris@43
|
538 //buffer is a multiple of the plugin's buffer size doesn't mean
|
Chris@43
|
539 //that we always get called for a multiple of it here (because it
|
Chris@43
|
540 //also depends on the JACK block size). how should we ensure that
|
Chris@43
|
541 //all models write the same amount in to the mix, and that we
|
Chris@43
|
542 //always have a multiple of the plugin buffer size? I guess this
|
Chris@43
|
543 //class has to be queryable for the plugin buffer size & the
|
Chris@43
|
544 //callback play source has to use that as a multiple for all the
|
Chris@43
|
545 //calls to mixModel
|
Chris@43
|
546
|
Chris@436
|
547 sv_frame_t got = blocks * m_processingBlockSize;
|
Chris@43
|
548
|
Chris@43
|
549 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@442
|
550 cout << "mixModel [clip]: start " << startFrame << ", frames " << frames
|
Chris@442
|
551 << ", blocks " << blocks << ", have " << m_noteOffs.size()
|
Chris@442
|
552 << " note-offs" << endl;
|
Chris@43
|
553 #endif
|
Chris@43
|
554
|
Chris@308
|
555 ClipMixer::NoteStart on;
|
Chris@308
|
556 ClipMixer::NoteEnd off;
|
Chris@43
|
557
|
Chris@616
|
558 NoteOffSet ¬eOffs = m_noteOffs[model->getId()];
|
Chris@43
|
559
|
Chris@308
|
560 float **bufferIndexes = new float *[m_targetChannelCount];
|
Chris@308
|
561
|
Chris@366
|
562 for (int i = 0; i < blocks; ++i) {
|
Chris@43
|
563
|
Chris@595
|
564 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
|
Chris@43
|
565
|
Chris@299
|
566 NoteList notes;
|
Chris@299
|
567 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
|
Chris@299
|
568 if (exportable) {
|
Chris@366
|
569 notes = exportable->getNotesWithin(reqStart,
|
Chris@366
|
570 reqStart + m_processingBlockSize);
|
Chris@299
|
571 }
|
Chris@43
|
572
|
Chris@308
|
573 std::vector<ClipMixer::NoteStart> starts;
|
Chris@308
|
574 std::vector<ClipMixer::NoteEnd> ends;
|
Chris@43
|
575
|
Chris@615
|
576 while (noteOffs.begin() != noteOffs.end() &&
|
Chris@615
|
577 noteOffs.begin()->onFrame > reqStart) {
|
Chris@615
|
578
|
Chris@615
|
579 // We must have jumped back in time, as there is a
|
Chris@615
|
580 // note-off pending for a note that hasn't begun yet. Emit
|
Chris@615
|
581 // the note-off now and discard
|
Chris@615
|
582
|
Chris@615
|
583 off.frameOffset = 0;
|
Chris@615
|
584 off.frequency = noteOffs.begin()->frequency;
|
Chris@615
|
585
|
Chris@615
|
586 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@615
|
587 cerr << "mixModel [clip]: adding rewind-caused note-off at frame offset 0 frequency " << off.frequency << endl;
|
Chris@615
|
588 #endif
|
Chris@615
|
589
|
Chris@615
|
590 ends.push_back(off);
|
Chris@615
|
591 noteOffs.erase(noteOffs.begin());
|
Chris@615
|
592 }
|
Chris@615
|
593
|
Chris@595
|
594 for (NoteList::const_iterator ni = notes.begin();
|
Chris@275
|
595 ni != notes.end(); ++ni) {
|
Chris@43
|
596
|
Chris@595
|
597 sv_frame_t noteFrame = ni->start;
|
Chris@596
|
598 sv_frame_t noteDuration = ni->duration;
|
Chris@43
|
599
|
Chris@595
|
600 if (noteFrame < reqStart ||
|
Chris@596
|
601 noteFrame >= reqStart + m_processingBlockSize) {
|
Chris@596
|
602 continue;
|
Chris@596
|
603 }
|
Chris@596
|
604
|
Chris@596
|
605 if (noteDuration == 0) {
|
Chris@596
|
606 // If we have a note-off and a note-on with the same
|
Chris@596
|
607 // time, then the note-off will be assumed (in the
|
Chris@596
|
608 // logic below that deals with two-point note-on/off
|
Chris@596
|
609 // events) to be switching off an earlier note before
|
Chris@596
|
610 // this one begins -- that's necessary in order to
|
Chris@596
|
611 // support adjoining notes of equal pitch. But it does
|
Chris@596
|
612 // mean we have to explicitly ignore zero-duration
|
Chris@596
|
613 // notes, otherwise they'll be played without end
|
Chris@596
|
614 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@596
|
615 cerr << "mixModel [clip]: zero-duration note found at frame " << noteFrame << ", skipping it" << endl;
|
Chris@596
|
616 #endif
|
Chris@596
|
617 continue;
|
Chris@596
|
618 }
|
Chris@43
|
619
|
Chris@595
|
620 while (noteOffs.begin() != noteOffs.end() &&
|
Chris@615
|
621 noteOffs.begin()->offFrame <= noteFrame) {
|
Chris@43
|
622
|
Chris@615
|
623 sv_frame_t eventFrame = noteOffs.begin()->offFrame;
|
Chris@308
|
624 if (eventFrame < reqStart) eventFrame = reqStart;
|
Chris@43
|
625
|
Chris@308
|
626 off.frameOffset = eventFrame - reqStart;
|
Chris@308
|
627 off.frequency = noteOffs.begin()->frequency;
|
Chris@43
|
628
|
Chris@43
|
629 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@595
|
630 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
|
Chris@43
|
631 #endif
|
Chris@43
|
632
|
Chris@308
|
633 ends.push_back(off);
|
Chris@595
|
634 noteOffs.erase(noteOffs.begin());
|
Chris@595
|
635 }
|
Chris@43
|
636
|
Chris@308
|
637 on.frameOffset = noteFrame - reqStart;
|
Chris@308
|
638 on.frequency = ni->getFrequency();
|
Chris@436
|
639 on.level = float(ni->velocity) / 127.0f;
|
Chris@308
|
640 on.pan = pan;
|
Chris@43
|
641
|
Chris@43
|
642 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@595
|
643 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
|
Chris@43
|
644 #endif
|
Chris@595
|
645
|
Chris@308
|
646 starts.push_back(on);
|
Chris@595
|
647 noteOffs.insert
|
Chris@615
|
648 (NoteOff(on.frequency, noteFrame + noteDuration, noteFrame));
|
Chris@595
|
649 }
|
Chris@43
|
650
|
Chris@595
|
651 while (noteOffs.begin() != noteOffs.end() &&
|
Chris@615
|
652 noteOffs.begin()->offFrame <=
|
Chris@615
|
653 reqStart + m_processingBlockSize) {
|
Chris@43
|
654
|
Chris@615
|
655 sv_frame_t eventFrame = noteOffs.begin()->offFrame;
|
Chris@308
|
656 if (eventFrame < reqStart) eventFrame = reqStart;
|
Chris@43
|
657
|
Chris@308
|
658 off.frameOffset = eventFrame - reqStart;
|
Chris@308
|
659 off.frequency = noteOffs.begin()->frequency;
|
Chris@43
|
660
|
Chris@43
|
661 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@313
|
662 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
|
Chris@43
|
663 #endif
|
Chris@43
|
664
|
Chris@308
|
665 ends.push_back(off);
|
Chris@308
|
666 noteOffs.erase(noteOffs.begin());
|
Chris@595
|
667 }
|
Chris@43
|
668
|
Chris@595
|
669 for (int c = 0; c < m_targetChannelCount; ++c) {
|
Chris@308
|
670 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
|
Chris@308
|
671 }
|
Chris@43
|
672
|
Chris@308
|
673 clipMixer->mix(bufferIndexes, gain, starts, ends);
|
Chris@308
|
674 }
|
Chris@43
|
675
|
Chris@308
|
676 delete[] bufferIndexes;
|
Chris@43
|
677
|
Chris@43
|
678 return got;
|
Chris@43
|
679 }
|
Chris@313
|
680
|
Chris@436
|
681 sv_frame_t
|
Chris@313
|
682 AudioGenerator::mixContinuousSynthModel(Model *model,
|
Chris@436
|
683 sv_frame_t startFrame,
|
Chris@436
|
684 sv_frame_t frames,
|
Chris@313
|
685 float **buffer,
|
Chris@313
|
686 float gain,
|
Chris@313
|
687 float pan)
|
Chris@313
|
688 {
|
Chris@616
|
689 ContinuousSynth *synth = m_continuousSynthMap[model->getId()];
|
Chris@313
|
690 if (!synth) return 0;
|
Chris@313
|
691
|
Chris@313
|
692 // only type we support here at the moment
|
Chris@313
|
693 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
|
Chris@313
|
694 if (stvm->getScaleUnits() != "Hz") return 0;
|
Chris@313
|
695
|
Chris@436
|
696 int blocks = int(frames / m_processingBlockSize);
|
Chris@313
|
697
|
Chris@313
|
698 //!!! todo: see comment in mixClipModel
|
Chris@313
|
699
|
Chris@436
|
700 sv_frame_t got = blocks * m_processingBlockSize;
|
Chris@313
|
701
|
Chris@313
|
702 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@313
|
703 cout << "mixModel [synth]: frames " << frames
|
Chris@595
|
704 << ", blocks " << blocks << endl;
|
Chris@313
|
705 #endif
|
Chris@313
|
706
|
Chris@313
|
707 float **bufferIndexes = new float *[m_targetChannelCount];
|
Chris@313
|
708
|
Chris@366
|
709 for (int i = 0; i < blocks; ++i) {
|
Chris@313
|
710
|
Chris@595
|
711 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
|
Chris@313
|
712
|
Chris@595
|
713 for (int c = 0; c < m_targetChannelCount; ++c) {
|
Chris@313
|
714 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
|
Chris@313
|
715 }
|
Chris@313
|
716
|
Chris@313
|
717 SparseTimeValueModel::PointList points =
|
Chris@313
|
718 stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
|
Chris@313
|
719
|
Chris@313
|
720 // by default, repeat last frequency
|
Chris@313
|
721 float f0 = 0.f;
|
Chris@313
|
722
|
Chris@313
|
723 // go straight to the last freq that is genuinely in this range
|
Chris@313
|
724 for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
|
Chris@313
|
725 itr != points.begin(); ) {
|
Chris@313
|
726 --itr;
|
Chris@313
|
727 if (itr->frame >= reqStart &&
|
Chris@313
|
728 itr->frame < reqStart + m_processingBlockSize) {
|
Chris@313
|
729 f0 = itr->value;
|
Chris@313
|
730 break;
|
Chris@313
|
731 }
|
Chris@313
|
732 }
|
Chris@313
|
733
|
Chris@314
|
734 // if we found no such frequency and the next point is further
|
Chris@314
|
735 // away than twice the model resolution, go silent (same
|
Chris@314
|
736 // criterion TimeValueLayer uses for ending a discrete curve
|
Chris@314
|
737 // segment)
|
Chris@314
|
738 if (f0 == 0.f) {
|
Chris@314
|
739 SparseTimeValueModel::PointList nextPoints =
|
Chris@314
|
740 stvm->getNextPoints(reqStart + m_processingBlockSize);
|
Chris@314
|
741 if (nextPoints.empty() ||
|
Chris@314
|
742 nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
|
Chris@314
|
743 f0 = -1.f;
|
Chris@314
|
744 }
|
Chris@314
|
745 }
|
Chris@314
|
746
|
Chris@315
|
747 // cerr << "f0 = " << f0 << endl;
|
Chris@313
|
748
|
Chris@313
|
749 synth->mix(bufferIndexes,
|
Chris@313
|
750 gain,
|
Chris@313
|
751 pan,
|
Chris@313
|
752 f0);
|
Chris@313
|
753 }
|
Chris@313
|
754
|
Chris@313
|
755 delete[] bufferIndexes;
|
Chris@313
|
756
|
Chris@313
|
757 return got;
|
Chris@313
|
758 }
|
Chris@313
|
759
|