comparison audio/AudioGenerator.cpp @ 470:4480b031fe38 3.0-integration

Merge from branch bqaudioio
author Chris Cannam
date Tue, 04 Aug 2015 16:39:40 +0100
parents audioio/AudioGenerator.cpp@1020db1698c0 audioio/AudioGenerator.cpp@56acd9368532
children 821aba42c1bb
comparison
equal deleted inserted replaced
469:85e7d2418d9a 470:4480b031fe38
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
2
3 /*
4 Sonic Visualiser
5 An audio file viewer and annotation editor.
6 Centre for Digital Music, Queen Mary, University of London.
7 This file copyright 2006 Chris Cannam.
8
9 This program is free software; you can redistribute it and/or
10 modify it under the terms of the GNU General Public License as
11 published by the Free Software Foundation; either version 2 of the
12 License, or (at your option) any later version. See the file
13 COPYING included with this distribution for more information.
14 */
15
16 #include "AudioGenerator.h"
17
18 #include "base/TempDirectory.h"
19 #include "base/PlayParameters.h"
20 #include "base/PlayParameterRepository.h"
21 #include "base/Pitch.h"
22 #include "base/Exceptions.h"
23
24 #include "data/model/NoteModel.h"
25 #include "data/model/FlexiNoteModel.h"
26 #include "data/model/DenseTimeValueModel.h"
27 #include "data/model/SparseTimeValueModel.h"
28 #include "data/model/SparseOneDimensionalModel.h"
29 #include "data/model/NoteData.h"
30
31 #include "ClipMixer.h"
32 #include "ContinuousSynth.h"
33
34 #include <iostream>
35 #include <cmath>
36
37 #include <QDir>
38 #include <QFile>
39
40 const sv_frame_t
41 AudioGenerator::m_processingBlockSize = 1024;
42
43 QString
44 AudioGenerator::m_sampleDir = "";
45
46 //#define DEBUG_AUDIO_GENERATOR 1
47
48 AudioGenerator::AudioGenerator() :
49 m_sourceSampleRate(0),
50 m_targetChannelCount(1),
51 m_waveType(0),
52 m_soloing(false),
53 m_channelBuffer(0),
54 m_channelBufSiz(0),
55 m_channelBufCount(0)
56 {
57 initialiseSampleDir();
58
59 connect(PlayParameterRepository::getInstance(),
60 SIGNAL(playClipIdChanged(const Playable *, QString)),
61 this,
62 SLOT(playClipIdChanged(const Playable *, QString)));
63 }
64
65 AudioGenerator::~AudioGenerator()
66 {
67 #ifdef DEBUG_AUDIO_GENERATOR
68 SVDEBUG << "AudioGenerator::~AudioGenerator" << endl;
69 #endif
70 }
71
72 void
73 AudioGenerator::initialiseSampleDir()
74 {
75 if (m_sampleDir != "") return;
76
77 try {
78 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
79 } catch (DirectoryCreationFailed f) {
80 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
81 << " Failed to create temporary sample directory"
82 << endl;
83 m_sampleDir = "";
84 return;
85 }
86
87 QDir sampleResourceDir(":/samples", "*.wav");
88
89 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
90
91 QString fileName(sampleResourceDir[i]);
92 QFile file(sampleResourceDir.filePath(fileName));
93 QString target = QDir(m_sampleDir).filePath(fileName);
94
95 if (!file.copy(target)) {
96 cerr << "WARNING: AudioGenerator::getSampleDir: "
97 << "Unable to copy " << fileName
98 << " into temporary directory \""
99 << m_sampleDir << "\"" << endl;
100 } else {
101 QFile tf(target);
102 tf.setPermissions(tf.permissions() |
103 QFile::WriteOwner |
104 QFile::WriteUser);
105 }
106 }
107 }
108
109 bool
110 AudioGenerator::addModel(Model *model)
111 {
112 if (m_sourceSampleRate == 0) {
113
114 m_sourceSampleRate = model->getSampleRate();
115
116 } else {
117
118 DenseTimeValueModel *dtvm =
119 dynamic_cast<DenseTimeValueModel *>(model);
120
121 if (dtvm) {
122 m_sourceSampleRate = model->getSampleRate();
123 return true;
124 }
125 }
126
127 const Playable *playable = model;
128 if (!playable || !playable->canPlay()) return 0;
129
130 PlayParameters *parameters =
131 PlayParameterRepository::getInstance()->getPlayParameters(playable);
132
133 bool willPlay = !parameters->isPlayMuted();
134
135 if (usesClipMixer(model)) {
136 ClipMixer *mixer = makeClipMixerFor(model);
137 if (mixer) {
138 QMutexLocker locker(&m_mutex);
139 m_clipMixerMap[model] = mixer;
140 return willPlay;
141 }
142 }
143
144 if (usesContinuousSynth(model)) {
145 ContinuousSynth *synth = makeSynthFor(model);
146 if (synth) {
147 QMutexLocker locker(&m_mutex);
148 m_continuousSynthMap[model] = synth;
149 return willPlay;
150 }
151 }
152
153 return false;
154 }
155
156 void
157 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
158 {
159 const Model *model = dynamic_cast<const Model *>(playable);
160 if (!model) {
161 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
162 << playable << " is not a supported model type"
163 << endl;
164 return;
165 }
166
167 if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
168
169 ClipMixer *mixer = makeClipMixerFor(model);
170 if (mixer) {
171 QMutexLocker locker(&m_mutex);
172 m_clipMixerMap[model] = mixer;
173 }
174 }
175
176 bool
177 AudioGenerator::usesClipMixer(const Model *model)
178 {
179 bool clip =
180 (qobject_cast<const SparseOneDimensionalModel *>(model) ||
181 qobject_cast<const NoteModel *>(model) ||
182 qobject_cast<const FlexiNoteModel *>(model));
183 return clip;
184 }
185
186 bool
187 AudioGenerator::wantsQuieterClips(const Model *model)
188 {
189 // basically, anything that usually has sustain (like notes) or
190 // often has multiple sounds at once (like notes) wants to use a
191 // quieter level than simple click tracks
192 bool does =
193 (qobject_cast<const NoteModel *>(model) ||
194 qobject_cast<const FlexiNoteModel *>(model));
195 return does;
196 }
197
198 bool
199 AudioGenerator::usesContinuousSynth(const Model *model)
200 {
201 bool cont =
202 (qobject_cast<const SparseTimeValueModel *>(model));
203 return cont;
204 }
205
206 ClipMixer *
207 AudioGenerator::makeClipMixerFor(const Model *model)
208 {
209 QString clipId;
210
211 const Playable *playable = model;
212 if (!playable || !playable->canPlay()) return 0;
213
214 PlayParameters *parameters =
215 PlayParameterRepository::getInstance()->getPlayParameters(playable);
216 if (parameters) {
217 clipId = parameters->getPlayClipId();
218 }
219
220 #ifdef DEBUG_AUDIO_GENERATOR
221 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
222 #endif
223
224 if (clipId == "") {
225 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
226 return 0;
227 }
228
229 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
230 m_sourceSampleRate,
231 m_processingBlockSize);
232
233 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required
234
235 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
236
237 double level = wantsQuieterClips(model) ? 0.5 : 1.0;
238 if (!mixer->loadClipData(clipPath, clipF0, level)) {
239 delete mixer;
240 return 0;
241 }
242
243 #ifdef DEBUG_AUDIO_GENERATOR
244 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
245 #endif
246
247 return mixer;
248 }
249
250 ContinuousSynth *
251 AudioGenerator::makeSynthFor(const Model *model)
252 {
253 const Playable *playable = model;
254 if (!playable || !playable->canPlay()) return 0;
255
256 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
257 m_sourceSampleRate,
258 m_processingBlockSize,
259 m_waveType);
260
261 #ifdef DEBUG_AUDIO_GENERATOR
262 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
263 #endif
264
265 return synth;
266 }
267
268 void
269 AudioGenerator::removeModel(Model *model)
270 {
271 SparseOneDimensionalModel *sodm =
272 dynamic_cast<SparseOneDimensionalModel *>(model);
273 if (!sodm) return; // nothing to do
274
275 QMutexLocker locker(&m_mutex);
276
277 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
278
279 ClipMixer *mixer = m_clipMixerMap[sodm];
280 m_clipMixerMap.erase(sodm);
281 delete mixer;
282 }
283
284 void
285 AudioGenerator::clearModels()
286 {
287 QMutexLocker locker(&m_mutex);
288
289 while (!m_clipMixerMap.empty()) {
290 ClipMixer *mixer = m_clipMixerMap.begin()->second;
291 m_clipMixerMap.erase(m_clipMixerMap.begin());
292 delete mixer;
293 }
294 }
295
296 void
297 AudioGenerator::reset()
298 {
299 QMutexLocker locker(&m_mutex);
300
301 #ifdef DEBUG_AUDIO_GENERATOR
302 cerr << "AudioGenerator::reset()" << endl;
303 #endif
304
305 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
306 if (i->second) {
307 i->second->reset();
308 }
309 }
310
311 m_noteOffs.clear();
312 }
313
314 void
315 AudioGenerator::setTargetChannelCount(int targetChannelCount)
316 {
317 if (m_targetChannelCount == targetChannelCount) return;
318
319 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
320
321 QMutexLocker locker(&m_mutex);
322 m_targetChannelCount = targetChannelCount;
323
324 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
325 if (i->second) i->second->setChannelCount(targetChannelCount);
326 }
327 }
328
329 sv_frame_t
330 AudioGenerator::getBlockSize() const
331 {
332 return m_processingBlockSize;
333 }
334
335 void
336 AudioGenerator::setSoloModelSet(std::set<Model *> s)
337 {
338 QMutexLocker locker(&m_mutex);
339
340 m_soloModelSet = s;
341 m_soloing = true;
342 }
343
344 void
345 AudioGenerator::clearSoloModelSet()
346 {
347 QMutexLocker locker(&m_mutex);
348
349 m_soloModelSet.clear();
350 m_soloing = false;
351 }
352
353 sv_frame_t
354 AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
355 float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut)
356 {
357 if (m_sourceSampleRate == 0) {
358 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
359 return frameCount;
360 }
361
362 QMutexLocker locker(&m_mutex);
363
364 Playable *playable = model;
365 if (!playable || !playable->canPlay()) return frameCount;
366
367 PlayParameters *parameters =
368 PlayParameterRepository::getInstance()->getPlayParameters(playable);
369 if (!parameters) return frameCount;
370
371 bool playing = !parameters->isPlayMuted();
372 if (!playing) {
373 #ifdef DEBUG_AUDIO_GENERATOR
374 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
375 #endif
376 return frameCount;
377 }
378
379 if (m_soloing) {
380 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
381 #ifdef DEBUG_AUDIO_GENERATOR
382 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
383 #endif
384 return frameCount;
385 }
386 }
387
388 float gain = parameters->getPlayGain();
389 float pan = parameters->getPlayPan();
390
391 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
392 if (dtvm) {
393 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
394 buffer, gain, pan, fadeIn, fadeOut);
395 }
396
397 if (usesClipMixer(model)) {
398 return mixClipModel(model, startFrame, frameCount,
399 buffer, gain, pan);
400 }
401
402 if (usesContinuousSynth(model)) {
403 return mixContinuousSynthModel(model, startFrame, frameCount,
404 buffer, gain, pan);
405 }
406
407 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
408
409 return frameCount;
410 }
411
412 sv_frame_t
413 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
414 sv_frame_t startFrame, sv_frame_t frames,
415 float **buffer, float gain, float pan,
416 sv_frame_t fadeIn, sv_frame_t fadeOut)
417 {
418 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut);
419
420 int modelChannels = dtvm->getChannelCount();
421
422 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
423
424 for (int c = 0; c < m_channelBufCount; ++c) {
425 delete[] m_channelBuffer[c];
426 }
427
428 delete[] m_channelBuffer;
429 m_channelBuffer = new float *[modelChannels];
430
431 for (int c = 0; c < modelChannels; ++c) {
432 m_channelBuffer[c] = new float[maxFrames];
433 }
434
435 m_channelBufCount = modelChannels;
436 m_channelBufSiz = maxFrames;
437 }
438
439 sv_frame_t got = 0;
440
441 if (startFrame >= fadeIn/2) {
442
443 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
444 startFrame - fadeIn/2,
445 frames + fadeOut/2 + fadeIn/2);
446
447 for (int c = 0; c < modelChannels; ++c) {
448 copy(data[c].begin(), data[c].end(), m_channelBuffer[c]);
449 }
450
451 got = data[0].size();
452
453 } else {
454 sv_frame_t missing = fadeIn/2 - startFrame;
455
456 if (missing > 0) {
457 cerr << "note: channelBufSiz = " << m_channelBufSiz
458 << ", frames + fadeOut/2 = " << frames + fadeOut/2
459 << ", startFrame = " << startFrame
460 << ", missing = " << missing << endl;
461 }
462
463 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
464 startFrame,
465 frames + fadeOut/2);
466 for (int c = 0; c < modelChannels; ++c) {
467 copy(data[c].begin(), data[c].end(), m_channelBuffer[c] + missing);
468 }
469
470 got = data[0].size() + missing;
471 }
472
473 for (int c = 0; c < m_targetChannelCount; ++c) {
474
475 int sourceChannel = (c % modelChannels);
476
477 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
478
479 float channelGain = gain;
480 if (pan != 0.0) {
481 if (c == 0) {
482 if (pan > 0.0) channelGain *= 1.0f - pan;
483 } else {
484 if (pan < 0.0) channelGain *= pan + 1.0f;
485 }
486 }
487
488 for (sv_frame_t i = 0; i < fadeIn/2; ++i) {
489 float *back = buffer[c];
490 back -= fadeIn/2;
491 back[i] +=
492 (channelGain * m_channelBuffer[sourceChannel][i] * float(i))
493 / float(fadeIn);
494 }
495
496 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) {
497 float mult = channelGain;
498 if (i < fadeIn/2) {
499 mult = (mult * float(i)) / float(fadeIn);
500 }
501 if (i > frames - fadeOut/2) {
502 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut);
503 }
504 float val = m_channelBuffer[sourceChannel][i];
505 if (i >= got) val = 0.f;
506 buffer[c][i] += mult * val;
507 }
508 }
509
510 return got;
511 }
512
513 sv_frame_t
514 AudioGenerator::mixClipModel(Model *model,
515 sv_frame_t startFrame, sv_frame_t frames,
516 float **buffer, float gain, float pan)
517 {
518 ClipMixer *clipMixer = m_clipMixerMap[model];
519 if (!clipMixer) return 0;
520
521 int blocks = int(frames / m_processingBlockSize);
522
523 //!!! todo: the below -- it matters
524
525 //!!! hang on -- the fact that the audio callback play source's
526 //buffer is a multiple of the plugin's buffer size doesn't mean
527 //that we always get called for a multiple of it here (because it
528 //also depends on the JACK block size). how should we ensure that
529 //all models write the same amount in to the mix, and that we
530 //always have a multiple of the plugin buffer size? I guess this
531 //class has to be queryable for the plugin buffer size & the
532 //callback play source has to use that as a multiple for all the
533 //calls to mixModel
534
535 sv_frame_t got = blocks * m_processingBlockSize;
536
537 #ifdef DEBUG_AUDIO_GENERATOR
538 cout << "mixModel [clip]: start " << startFrame << ", frames " << frames
539 << ", blocks " << blocks << ", have " << m_noteOffs.size()
540 << " note-offs" << endl;
541 #endif
542
543 ClipMixer::NoteStart on;
544 ClipMixer::NoteEnd off;
545
546 NoteOffSet &noteOffs = m_noteOffs[model];
547
548 float **bufferIndexes = new float *[m_targetChannelCount];
549
550 for (int i = 0; i < blocks; ++i) {
551
552 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
553
554 NoteList notes;
555 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
556 if (exportable) {
557 notes = exportable->getNotesWithin(reqStart,
558 reqStart + m_processingBlockSize);
559 }
560
561 std::vector<ClipMixer::NoteStart> starts;
562 std::vector<ClipMixer::NoteEnd> ends;
563
564 for (NoteList::const_iterator ni = notes.begin();
565 ni != notes.end(); ++ni) {
566
567 sv_frame_t noteFrame = ni->start;
568
569 if (noteFrame < reqStart ||
570 noteFrame >= reqStart + m_processingBlockSize) continue;
571
572 while (noteOffs.begin() != noteOffs.end() &&
573 noteOffs.begin()->frame <= noteFrame) {
574
575 sv_frame_t eventFrame = noteOffs.begin()->frame;
576 if (eventFrame < reqStart) eventFrame = reqStart;
577
578 off.frameOffset = eventFrame - reqStart;
579 off.frequency = noteOffs.begin()->frequency;
580
581 #ifdef DEBUG_AUDIO_GENERATOR
582 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
583 #endif
584
585 ends.push_back(off);
586 noteOffs.erase(noteOffs.begin());
587 }
588
589 on.frameOffset = noteFrame - reqStart;
590 on.frequency = ni->getFrequency();
591 on.level = float(ni->velocity) / 127.0f;
592 on.pan = pan;
593
594 #ifdef DEBUG_AUDIO_GENERATOR
595 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
596 #endif
597
598 starts.push_back(on);
599 noteOffs.insert
600 (NoteOff(on.frequency, noteFrame + ni->duration));
601 }
602
603 while (noteOffs.begin() != noteOffs.end() &&
604 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
605
606 sv_frame_t eventFrame = noteOffs.begin()->frame;
607 if (eventFrame < reqStart) eventFrame = reqStart;
608
609 off.frameOffset = eventFrame - reqStart;
610 off.frequency = noteOffs.begin()->frequency;
611
612 #ifdef DEBUG_AUDIO_GENERATOR
613 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
614 #endif
615
616 ends.push_back(off);
617 noteOffs.erase(noteOffs.begin());
618 }
619
620 for (int c = 0; c < m_targetChannelCount; ++c) {
621 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
622 }
623
624 clipMixer->mix(bufferIndexes, gain, starts, ends);
625 }
626
627 delete[] bufferIndexes;
628
629 return got;
630 }
631
632 sv_frame_t
633 AudioGenerator::mixContinuousSynthModel(Model *model,
634 sv_frame_t startFrame,
635 sv_frame_t frames,
636 float **buffer,
637 float gain,
638 float pan)
639 {
640 ContinuousSynth *synth = m_continuousSynthMap[model];
641 if (!synth) return 0;
642
643 // only type we support here at the moment
644 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
645 if (stvm->getScaleUnits() != "Hz") return 0;
646
647 int blocks = int(frames / m_processingBlockSize);
648
649 //!!! todo: see comment in mixClipModel
650
651 sv_frame_t got = blocks * m_processingBlockSize;
652
653 #ifdef DEBUG_AUDIO_GENERATOR
654 cout << "mixModel [synth]: frames " << frames
655 << ", blocks " << blocks << endl;
656 #endif
657
658 float **bufferIndexes = new float *[m_targetChannelCount];
659
660 for (int i = 0; i < blocks; ++i) {
661
662 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
663
664 for (int c = 0; c < m_targetChannelCount; ++c) {
665 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
666 }
667
668 SparseTimeValueModel::PointList points =
669 stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
670
671 // by default, repeat last frequency
672 float f0 = 0.f;
673
674 // go straight to the last freq that is genuinely in this range
675 for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
676 itr != points.begin(); ) {
677 --itr;
678 if (itr->frame >= reqStart &&
679 itr->frame < reqStart + m_processingBlockSize) {
680 f0 = itr->value;
681 break;
682 }
683 }
684
685 // if we found no such frequency and the next point is further
686 // away than twice the model resolution, go silent (same
687 // criterion TimeValueLayer uses for ending a discrete curve
688 // segment)
689 if (f0 == 0.f) {
690 SparseTimeValueModel::PointList nextPoints =
691 stvm->getNextPoints(reqStart + m_processingBlockSize);
692 if (nextPoints.empty() ||
693 nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
694 f0 = -1.f;
695 }
696 }
697
698 // cerr << "f0 = " << f0 << endl;
699
700 synth->mix(bufferIndexes,
701 gain,
702 pan,
703 f0);
704 }
705
706 delete[] bufferIndexes;
707
708 return got;
709 }
710