Mercurial > hg > svapp
comparison audio/AudioGenerator.cpp @ 468:56acd9368532 bqaudioio
Initial work toward switching to bqaudioio library (so as to get I/O, not just O)
author | Chris Cannam |
---|---|
date | Tue, 04 Aug 2015 13:27:42 +0100 |
parents | audioio/AudioGenerator.cpp@3485d324c172 |
children | 4480b031fe38 |
comparison
equal
deleted
inserted
replaced
466:45054b36ddbf | 468:56acd9368532 |
---|---|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ | |
2 | |
3 /* | |
4 Sonic Visualiser | |
5 An audio file viewer and annotation editor. | |
6 Centre for Digital Music, Queen Mary, University of London. | |
7 This file copyright 2006 Chris Cannam. | |
8 | |
9 This program is free software; you can redistribute it and/or | |
10 modify it under the terms of the GNU General Public License as | |
11 published by the Free Software Foundation; either version 2 of the | |
12 License, or (at your option) any later version. See the file | |
13 COPYING included with this distribution for more information. | |
14 */ | |
15 | |
16 #include "AudioGenerator.h" | |
17 | |
18 #include "base/TempDirectory.h" | |
19 #include "base/PlayParameters.h" | |
20 #include "base/PlayParameterRepository.h" | |
21 #include "base/Pitch.h" | |
22 #include "base/Exceptions.h" | |
23 | |
24 #include "data/model/NoteModel.h" | |
25 #include "data/model/FlexiNoteModel.h" | |
26 #include "data/model/DenseTimeValueModel.h" | |
27 #include "data/model/SparseTimeValueModel.h" | |
28 #include "data/model/SparseOneDimensionalModel.h" | |
29 #include "data/model/NoteData.h" | |
30 | |
31 #include "ClipMixer.h" | |
32 #include "ContinuousSynth.h" | |
33 | |
34 #include <iostream> | |
35 #include <cmath> | |
36 | |
37 #include <QDir> | |
38 #include <QFile> | |
39 | |
40 const sv_frame_t | |
41 AudioGenerator::m_processingBlockSize = 1024; | |
42 | |
43 QString | |
44 AudioGenerator::m_sampleDir = ""; | |
45 | |
46 //#define DEBUG_AUDIO_GENERATOR 1 | |
47 | |
48 AudioGenerator::AudioGenerator() : | |
49 m_sourceSampleRate(0), | |
50 m_targetChannelCount(1), | |
51 m_waveType(0), | |
52 m_soloing(false), | |
53 m_channelBuffer(0), | |
54 m_channelBufSiz(0), | |
55 m_channelBufCount(0) | |
56 { | |
57 initialiseSampleDir(); | |
58 | |
59 connect(PlayParameterRepository::getInstance(), | |
60 SIGNAL(playClipIdChanged(const Playable *, QString)), | |
61 this, | |
62 SLOT(playClipIdChanged(const Playable *, QString))); | |
63 } | |
64 | |
65 AudioGenerator::~AudioGenerator() | |
66 { | |
67 #ifdef DEBUG_AUDIO_GENERATOR | |
68 SVDEBUG << "AudioGenerator::~AudioGenerator" << endl; | |
69 #endif | |
70 } | |
71 | |
72 void | |
73 AudioGenerator::initialiseSampleDir() | |
74 { | |
75 if (m_sampleDir != "") return; | |
76 | |
77 try { | |
78 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples"); | |
79 } catch (DirectoryCreationFailed f) { | |
80 cerr << "WARNING: AudioGenerator::initialiseSampleDir:" | |
81 << " Failed to create temporary sample directory" | |
82 << endl; | |
83 m_sampleDir = ""; | |
84 return; | |
85 } | |
86 | |
87 QDir sampleResourceDir(":/samples", "*.wav"); | |
88 | |
89 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) { | |
90 | |
91 QString fileName(sampleResourceDir[i]); | |
92 QFile file(sampleResourceDir.filePath(fileName)); | |
93 QString target = QDir(m_sampleDir).filePath(fileName); | |
94 | |
95 if (!file.copy(target)) { | |
96 cerr << "WARNING: AudioGenerator::getSampleDir: " | |
97 << "Unable to copy " << fileName | |
98 << " into temporary directory \"" | |
99 << m_sampleDir << "\"" << endl; | |
100 } else { | |
101 QFile tf(target); | |
102 tf.setPermissions(tf.permissions() | | |
103 QFile::WriteOwner | | |
104 QFile::WriteUser); | |
105 } | |
106 } | |
107 } | |
108 | |
109 bool | |
110 AudioGenerator::addModel(Model *model) | |
111 { | |
112 if (m_sourceSampleRate == 0) { | |
113 | |
114 m_sourceSampleRate = model->getSampleRate(); | |
115 | |
116 } else { | |
117 | |
118 DenseTimeValueModel *dtvm = | |
119 dynamic_cast<DenseTimeValueModel *>(model); | |
120 | |
121 if (dtvm) { | |
122 m_sourceSampleRate = model->getSampleRate(); | |
123 return true; | |
124 } | |
125 } | |
126 | |
127 const Playable *playable = model; | |
128 if (!playable || !playable->canPlay()) return 0; | |
129 | |
130 PlayParameters *parameters = | |
131 PlayParameterRepository::getInstance()->getPlayParameters(playable); | |
132 | |
133 bool willPlay = !parameters->isPlayMuted(); | |
134 | |
135 if (usesClipMixer(model)) { | |
136 ClipMixer *mixer = makeClipMixerFor(model); | |
137 if (mixer) { | |
138 QMutexLocker locker(&m_mutex); | |
139 m_clipMixerMap[model] = mixer; | |
140 return willPlay; | |
141 } | |
142 } | |
143 | |
144 if (usesContinuousSynth(model)) { | |
145 ContinuousSynth *synth = makeSynthFor(model); | |
146 if (synth) { | |
147 QMutexLocker locker(&m_mutex); | |
148 m_continuousSynthMap[model] = synth; | |
149 return willPlay; | |
150 } | |
151 } | |
152 | |
153 return false; | |
154 } | |
155 | |
156 void | |
157 AudioGenerator::playClipIdChanged(const Playable *playable, QString) | |
158 { | |
159 const Model *model = dynamic_cast<const Model *>(playable); | |
160 if (!model) { | |
161 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable " | |
162 << playable << " is not a supported model type" | |
163 << endl; | |
164 return; | |
165 } | |
166 | |
167 if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return; | |
168 | |
169 ClipMixer *mixer = makeClipMixerFor(model); | |
170 if (mixer) { | |
171 QMutexLocker locker(&m_mutex); | |
172 m_clipMixerMap[model] = mixer; | |
173 } | |
174 } | |
175 | |
176 bool | |
177 AudioGenerator::usesClipMixer(const Model *model) | |
178 { | |
179 bool clip = | |
180 (qobject_cast<const SparseOneDimensionalModel *>(model) || | |
181 qobject_cast<const NoteModel *>(model) || | |
182 qobject_cast<const FlexiNoteModel *>(model)); | |
183 return clip; | |
184 } | |
185 | |
186 bool | |
187 AudioGenerator::wantsQuieterClips(const Model *model) | |
188 { | |
189 // basically, anything that usually has sustain (like notes) or | |
190 // often has multiple sounds at once (like notes) wants to use a | |
191 // quieter level than simple click tracks | |
192 bool does = | |
193 (qobject_cast<const NoteModel *>(model) || | |
194 qobject_cast<const FlexiNoteModel *>(model)); | |
195 return does; | |
196 } | |
197 | |
198 bool | |
199 AudioGenerator::usesContinuousSynth(const Model *model) | |
200 { | |
201 bool cont = | |
202 (qobject_cast<const SparseTimeValueModel *>(model)); | |
203 return cont; | |
204 } | |
205 | |
206 ClipMixer * | |
207 AudioGenerator::makeClipMixerFor(const Model *model) | |
208 { | |
209 QString clipId; | |
210 | |
211 const Playable *playable = model; | |
212 if (!playable || !playable->canPlay()) return 0; | |
213 | |
214 PlayParameters *parameters = | |
215 PlayParameterRepository::getInstance()->getPlayParameters(playable); | |
216 if (parameters) { | |
217 clipId = parameters->getPlayClipId(); | |
218 } | |
219 | |
220 #ifdef DEBUG_AUDIO_GENERATOR | |
221 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl; | |
222 #endif | |
223 | |
224 if (clipId == "") { | |
225 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl; | |
226 return 0; | |
227 } | |
228 | |
229 ClipMixer *mixer = new ClipMixer(m_targetChannelCount, | |
230 m_sourceSampleRate, | |
231 m_processingBlockSize); | |
232 | |
233 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required | |
234 | |
235 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId); | |
236 | |
237 double level = wantsQuieterClips(model) ? 0.5 : 1.0; | |
238 if (!mixer->loadClipData(clipPath, clipF0, level)) { | |
239 delete mixer; | |
240 return 0; | |
241 } | |
242 | |
243 #ifdef DEBUG_AUDIO_GENERATOR | |
244 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl; | |
245 #endif | |
246 | |
247 return mixer; | |
248 } | |
249 | |
250 ContinuousSynth * | |
251 AudioGenerator::makeSynthFor(const Model *model) | |
252 { | |
253 const Playable *playable = model; | |
254 if (!playable || !playable->canPlay()) return 0; | |
255 | |
256 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount, | |
257 m_sourceSampleRate, | |
258 m_processingBlockSize, | |
259 m_waveType); | |
260 | |
261 #ifdef DEBUG_AUDIO_GENERATOR | |
262 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl; | |
263 #endif | |
264 | |
265 return synth; | |
266 } | |
267 | |
268 void | |
269 AudioGenerator::removeModel(Model *model) | |
270 { | |
271 SparseOneDimensionalModel *sodm = | |
272 dynamic_cast<SparseOneDimensionalModel *>(model); | |
273 if (!sodm) return; // nothing to do | |
274 | |
275 QMutexLocker locker(&m_mutex); | |
276 | |
277 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return; | |
278 | |
279 ClipMixer *mixer = m_clipMixerMap[sodm]; | |
280 m_clipMixerMap.erase(sodm); | |
281 delete mixer; | |
282 } | |
283 | |
284 void | |
285 AudioGenerator::clearModels() | |
286 { | |
287 QMutexLocker locker(&m_mutex); | |
288 | |
289 while (!m_clipMixerMap.empty()) { | |
290 ClipMixer *mixer = m_clipMixerMap.begin()->second; | |
291 m_clipMixerMap.erase(m_clipMixerMap.begin()); | |
292 delete mixer; | |
293 } | |
294 } | |
295 | |
296 void | |
297 AudioGenerator::reset() | |
298 { | |
299 QMutexLocker locker(&m_mutex); | |
300 | |
301 #ifdef DEBUG_AUDIO_GENERATOR | |
302 cerr << "AudioGenerator::reset()" << endl; | |
303 #endif | |
304 | |
305 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { | |
306 if (i->second) { | |
307 i->second->reset(); | |
308 } | |
309 } | |
310 | |
311 m_noteOffs.clear(); | |
312 } | |
313 | |
314 void | |
315 AudioGenerator::setTargetChannelCount(int targetChannelCount) | |
316 { | |
317 if (m_targetChannelCount == targetChannelCount) return; | |
318 | |
319 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl; | |
320 | |
321 QMutexLocker locker(&m_mutex); | |
322 m_targetChannelCount = targetChannelCount; | |
323 | |
324 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { | |
325 if (i->second) i->second->setChannelCount(targetChannelCount); | |
326 } | |
327 } | |
328 | |
329 sv_frame_t | |
330 AudioGenerator::getBlockSize() const | |
331 { | |
332 return m_processingBlockSize; | |
333 } | |
334 | |
335 void | |
336 AudioGenerator::setSoloModelSet(std::set<Model *> s) | |
337 { | |
338 QMutexLocker locker(&m_mutex); | |
339 | |
340 m_soloModelSet = s; | |
341 m_soloing = true; | |
342 } | |
343 | |
344 void | |
345 AudioGenerator::clearSoloModelSet() | |
346 { | |
347 QMutexLocker locker(&m_mutex); | |
348 | |
349 m_soloModelSet.clear(); | |
350 m_soloing = false; | |
351 } | |
352 | |
353 sv_frame_t | |
354 AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount, | |
355 float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut) | |
356 { | |
357 if (m_sourceSampleRate == 0) { | |
358 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; | |
359 return frameCount; | |
360 } | |
361 | |
362 QMutexLocker locker(&m_mutex); | |
363 | |
364 Playable *playable = model; | |
365 if (!playable || !playable->canPlay()) return frameCount; | |
366 | |
367 PlayParameters *parameters = | |
368 PlayParameterRepository::getInstance()->getPlayParameters(playable); | |
369 if (!parameters) return frameCount; | |
370 | |
371 bool playing = !parameters->isPlayMuted(); | |
372 if (!playing) { | |
373 #ifdef DEBUG_AUDIO_GENERATOR | |
374 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl; | |
375 #endif | |
376 return frameCount; | |
377 } | |
378 | |
379 if (m_soloing) { | |
380 if (m_soloModelSet.find(model) == m_soloModelSet.end()) { | |
381 #ifdef DEBUG_AUDIO_GENERATOR | |
382 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl; | |
383 #endif | |
384 return frameCount; | |
385 } | |
386 } | |
387 | |
388 float gain = parameters->getPlayGain(); | |
389 float pan = parameters->getPlayPan(); | |
390 | |
391 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model); | |
392 if (dtvm) { | |
393 return mixDenseTimeValueModel(dtvm, startFrame, frameCount, | |
394 buffer, gain, pan, fadeIn, fadeOut); | |
395 } | |
396 | |
397 if (usesClipMixer(model)) { | |
398 return mixClipModel(model, startFrame, frameCount, | |
399 buffer, gain, pan); | |
400 } | |
401 | |
402 if (usesContinuousSynth(model)) { | |
403 return mixContinuousSynthModel(model, startFrame, frameCount, | |
404 buffer, gain, pan); | |
405 } | |
406 | |
407 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; | |
408 | |
409 return frameCount; | |
410 } | |
411 | |
412 sv_frame_t | |
413 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, | |
414 sv_frame_t startFrame, sv_frame_t frames, | |
415 float **buffer, float gain, float pan, | |
416 sv_frame_t fadeIn, sv_frame_t fadeOut) | |
417 { | |
418 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut); | |
419 | |
420 int modelChannels = dtvm->getChannelCount(); | |
421 | |
422 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) { | |
423 | |
424 for (int c = 0; c < m_channelBufCount; ++c) { | |
425 delete[] m_channelBuffer[c]; | |
426 } | |
427 | |
428 delete[] m_channelBuffer; | |
429 m_channelBuffer = new float *[modelChannels]; | |
430 | |
431 for (int c = 0; c < modelChannels; ++c) { | |
432 m_channelBuffer[c] = new float[maxFrames]; | |
433 } | |
434 | |
435 m_channelBufCount = modelChannels; | |
436 m_channelBufSiz = maxFrames; | |
437 } | |
438 | |
439 sv_frame_t got = 0; | |
440 | |
441 if (startFrame >= fadeIn/2) { | |
442 got = dtvm->getMultiChannelData(0, modelChannels - 1, | |
443 startFrame - fadeIn/2, | |
444 frames + fadeOut/2 + fadeIn/2, | |
445 m_channelBuffer); | |
446 } else { | |
447 sv_frame_t missing = fadeIn/2 - startFrame; | |
448 | |
449 for (int c = 0; c < modelChannels; ++c) { | |
450 m_channelBuffer[c] += missing; | |
451 } | |
452 | |
453 if (missing > 0) { | |
454 cerr << "note: channelBufSiz = " << m_channelBufSiz | |
455 << ", frames + fadeOut/2 = " << frames + fadeOut/2 | |
456 << ", startFrame = " << startFrame | |
457 << ", missing = " << missing << endl; | |
458 } | |
459 | |
460 got = dtvm->getMultiChannelData(0, modelChannels - 1, | |
461 startFrame, | |
462 frames + fadeOut/2, | |
463 m_channelBuffer); | |
464 | |
465 for (int c = 0; c < modelChannels; ++c) { | |
466 m_channelBuffer[c] -= missing; | |
467 } | |
468 | |
469 got += missing; | |
470 } | |
471 | |
472 for (int c = 0; c < m_targetChannelCount; ++c) { | |
473 | |
474 int sourceChannel = (c % modelChannels); | |
475 | |
476 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; | |
477 | |
478 float channelGain = gain; | |
479 if (pan != 0.0) { | |
480 if (c == 0) { | |
481 if (pan > 0.0) channelGain *= 1.0f - pan; | |
482 } else { | |
483 if (pan < 0.0) channelGain *= pan + 1.0f; | |
484 } | |
485 } | |
486 | |
487 for (sv_frame_t i = 0; i < fadeIn/2; ++i) { | |
488 float *back = buffer[c]; | |
489 back -= fadeIn/2; | |
490 back[i] += | |
491 (channelGain * m_channelBuffer[sourceChannel][i] * float(i)) | |
492 / float(fadeIn); | |
493 } | |
494 | |
495 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) { | |
496 float mult = channelGain; | |
497 if (i < fadeIn/2) { | |
498 mult = (mult * float(i)) / float(fadeIn); | |
499 } | |
500 if (i > frames - fadeOut/2) { | |
501 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut); | |
502 } | |
503 float val = m_channelBuffer[sourceChannel][i]; | |
504 if (i >= got) val = 0.f; | |
505 buffer[c][i] += mult * val; | |
506 } | |
507 } | |
508 | |
509 return got; | |
510 } | |
511 | |
512 sv_frame_t | |
513 AudioGenerator::mixClipModel(Model *model, | |
514 sv_frame_t startFrame, sv_frame_t frames, | |
515 float **buffer, float gain, float pan) | |
516 { | |
517 ClipMixer *clipMixer = m_clipMixerMap[model]; | |
518 if (!clipMixer) return 0; | |
519 | |
520 int blocks = int(frames / m_processingBlockSize); | |
521 | |
522 //!!! todo: the below -- it matters | |
523 | |
524 //!!! hang on -- the fact that the audio callback play source's | |
525 //buffer is a multiple of the plugin's buffer size doesn't mean | |
526 //that we always get called for a multiple of it here (because it | |
527 //also depends on the JACK block size). how should we ensure that | |
528 //all models write the same amount in to the mix, and that we | |
529 //always have a multiple of the plugin buffer size? I guess this | |
530 //class has to be queryable for the plugin buffer size & the | |
531 //callback play source has to use that as a multiple for all the | |
532 //calls to mixModel | |
533 | |
534 sv_frame_t got = blocks * m_processingBlockSize; | |
535 | |
536 #ifdef DEBUG_AUDIO_GENERATOR | |
537 cout << "mixModel [clip]: start " << startFrame << ", frames " << frames | |
538 << ", blocks " << blocks << ", have " << m_noteOffs.size() | |
539 << " note-offs" << endl; | |
540 #endif | |
541 | |
542 ClipMixer::NoteStart on; | |
543 ClipMixer::NoteEnd off; | |
544 | |
545 NoteOffSet ¬eOffs = m_noteOffs[model]; | |
546 | |
547 float **bufferIndexes = new float *[m_targetChannelCount]; | |
548 | |
549 for (int i = 0; i < blocks; ++i) { | |
550 | |
551 sv_frame_t reqStart = startFrame + i * m_processingBlockSize; | |
552 | |
553 NoteList notes; | |
554 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); | |
555 if (exportable) { | |
556 notes = exportable->getNotesWithin(reqStart, | |
557 reqStart + m_processingBlockSize); | |
558 } | |
559 | |
560 std::vector<ClipMixer::NoteStart> starts; | |
561 std::vector<ClipMixer::NoteEnd> ends; | |
562 | |
563 for (NoteList::const_iterator ni = notes.begin(); | |
564 ni != notes.end(); ++ni) { | |
565 | |
566 sv_frame_t noteFrame = ni->start; | |
567 | |
568 if (noteFrame < reqStart || | |
569 noteFrame >= reqStart + m_processingBlockSize) continue; | |
570 | |
571 while (noteOffs.begin() != noteOffs.end() && | |
572 noteOffs.begin()->frame <= noteFrame) { | |
573 | |
574 sv_frame_t eventFrame = noteOffs.begin()->frame; | |
575 if (eventFrame < reqStart) eventFrame = reqStart; | |
576 | |
577 off.frameOffset = eventFrame - reqStart; | |
578 off.frequency = noteOffs.begin()->frequency; | |
579 | |
580 #ifdef DEBUG_AUDIO_GENERATOR | |
581 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl; | |
582 #endif | |
583 | |
584 ends.push_back(off); | |
585 noteOffs.erase(noteOffs.begin()); | |
586 } | |
587 | |
588 on.frameOffset = noteFrame - reqStart; | |
589 on.frequency = ni->getFrequency(); | |
590 on.level = float(ni->velocity) / 127.0f; | |
591 on.pan = pan; | |
592 | |
593 #ifdef DEBUG_AUDIO_GENERATOR | |
594 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl; | |
595 #endif | |
596 | |
597 starts.push_back(on); | |
598 noteOffs.insert | |
599 (NoteOff(on.frequency, noteFrame + ni->duration)); | |
600 } | |
601 | |
602 while (noteOffs.begin() != noteOffs.end() && | |
603 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { | |
604 | |
605 sv_frame_t eventFrame = noteOffs.begin()->frame; | |
606 if (eventFrame < reqStart) eventFrame = reqStart; | |
607 | |
608 off.frameOffset = eventFrame - reqStart; | |
609 off.frequency = noteOffs.begin()->frequency; | |
610 | |
611 #ifdef DEBUG_AUDIO_GENERATOR | |
612 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl; | |
613 #endif | |
614 | |
615 ends.push_back(off); | |
616 noteOffs.erase(noteOffs.begin()); | |
617 } | |
618 | |
619 for (int c = 0; c < m_targetChannelCount; ++c) { | |
620 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; | |
621 } | |
622 | |
623 clipMixer->mix(bufferIndexes, gain, starts, ends); | |
624 } | |
625 | |
626 delete[] bufferIndexes; | |
627 | |
628 return got; | |
629 } | |
630 | |
631 sv_frame_t | |
632 AudioGenerator::mixContinuousSynthModel(Model *model, | |
633 sv_frame_t startFrame, | |
634 sv_frame_t frames, | |
635 float **buffer, | |
636 float gain, | |
637 float pan) | |
638 { | |
639 ContinuousSynth *synth = m_continuousSynthMap[model]; | |
640 if (!synth) return 0; | |
641 | |
642 // only type we support here at the moment | |
643 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); | |
644 if (stvm->getScaleUnits() != "Hz") return 0; | |
645 | |
646 int blocks = int(frames / m_processingBlockSize); | |
647 | |
648 //!!! todo: see comment in mixClipModel | |
649 | |
650 sv_frame_t got = blocks * m_processingBlockSize; | |
651 | |
652 #ifdef DEBUG_AUDIO_GENERATOR | |
653 cout << "mixModel [synth]: frames " << frames | |
654 << ", blocks " << blocks << endl; | |
655 #endif | |
656 | |
657 float **bufferIndexes = new float *[m_targetChannelCount]; | |
658 | |
659 for (int i = 0; i < blocks; ++i) { | |
660 | |
661 sv_frame_t reqStart = startFrame + i * m_processingBlockSize; | |
662 | |
663 for (int c = 0; c < m_targetChannelCount; ++c) { | |
664 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; | |
665 } | |
666 | |
667 SparseTimeValueModel::PointList points = | |
668 stvm->getPoints(reqStart, reqStart + m_processingBlockSize); | |
669 | |
670 // by default, repeat last frequency | |
671 float f0 = 0.f; | |
672 | |
673 // go straight to the last freq that is genuinely in this range | |
674 for (SparseTimeValueModel::PointList::const_iterator itr = points.end(); | |
675 itr != points.begin(); ) { | |
676 --itr; | |
677 if (itr->frame >= reqStart && | |
678 itr->frame < reqStart + m_processingBlockSize) { | |
679 f0 = itr->value; | |
680 break; | |
681 } | |
682 } | |
683 | |
684 // if we found no such frequency and the next point is further | |
685 // away than twice the model resolution, go silent (same | |
686 // criterion TimeValueLayer uses for ending a discrete curve | |
687 // segment) | |
688 if (f0 == 0.f) { | |
689 SparseTimeValueModel::PointList nextPoints = | |
690 stvm->getNextPoints(reqStart + m_processingBlockSize); | |
691 if (nextPoints.empty() || | |
692 nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) { | |
693 f0 = -1.f; | |
694 } | |
695 } | |
696 | |
697 // cerr << "f0 = " << f0 << endl; | |
698 | |
699 synth->mix(bufferIndexes, | |
700 gain, | |
701 pan, | |
702 f0); | |
703 } | |
704 | |
705 delete[] bufferIndexes; | |
706 | |
707 return got; | |
708 } | |
709 |