Mercurial > hg > sonic-visualiser
comparison audioio/AudioGenerator.cpp @ 0:cd5d7ff8ef38
* Reorganising code base. This revision will not compile.
author | Chris Cannam |
---|---|
date | Mon, 31 Jul 2006 12:03:45 +0000 |
parents | |
children | 40116f709d3b |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:cd5d7ff8ef38 |
---|---|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ | |
2 | |
3 /* | |
4 Sonic Visualiser | |
5 An audio file viewer and annotation editor. | |
6 Centre for Digital Music, Queen Mary, University of London. | |
7 This file copyright 2006 Chris Cannam. | |
8 | |
9 This program is free software; you can redistribute it and/or | |
10 modify it under the terms of the GNU General Public License as | |
11 published by the Free Software Foundation; either version 2 of the | |
12 License, or (at your option) any later version. See the file | |
13 COPYING included with this distribution for more information. | |
14 */ | |
15 | |
16 #include "AudioGenerator.h" | |
17 | |
18 #include "base/TempDirectory.h" | |
19 #include "base/PlayParameters.h" | |
20 #include "base/PlayParameterRepository.h" | |
21 #include "base/Pitch.h" | |
22 #include "base/Exceptions.h" | |
23 | |
24 #include "model/NoteModel.h" | |
25 #include "model/DenseTimeValueModel.h" | |
26 #include "model/SparseOneDimensionalModel.h" | |
27 | |
28 #include "plugin/RealTimePluginFactory.h" | |
29 #include "plugin/RealTimePluginInstance.h" | |
30 #include "plugin/PluginIdentifier.h" | |
31 #include "plugin/PluginXml.h" | |
32 #include "plugin/api/alsa/seq_event.h" | |
33 | |
34 #include <iostream> | |
35 #include <math.h> | |
36 | |
37 #include <QDir> | |
38 #include <QFile> | |
39 | |
40 const size_t | |
41 AudioGenerator::m_pluginBlockSize = 2048; | |
42 | |
43 QString | |
44 AudioGenerator::m_sampleDir = ""; | |
45 | |
46 //#define DEBUG_AUDIO_GENERATOR 1 | |
47 | |
48 AudioGenerator::AudioGenerator() : | |
49 m_sourceSampleRate(0), | |
50 m_targetChannelCount(1) | |
51 { | |
52 connect(PlayParameterRepository::getInstance(), | |
53 SIGNAL(playPluginIdChanged(const Model *, QString)), | |
54 this, | |
55 SLOT(playPluginIdChanged(const Model *, QString))); | |
56 | |
57 connect(PlayParameterRepository::getInstance(), | |
58 SIGNAL(playPluginConfigurationChanged(const Model *, QString)), | |
59 this, | |
60 SLOT(playPluginConfigurationChanged(const Model *, QString))); | |
61 } | |
62 | |
63 AudioGenerator::~AudioGenerator() | |
64 { | |
65 } | |
66 | |
67 bool | |
68 AudioGenerator::canPlay(const Model *model) | |
69 { | |
70 if (dynamic_cast<const DenseTimeValueModel *>(model) || | |
71 dynamic_cast<const SparseOneDimensionalModel *>(model) || | |
72 dynamic_cast<const NoteModel *>(model)) { | |
73 return true; | |
74 } else { | |
75 return false; | |
76 } | |
77 } | |
78 | |
79 bool | |
80 AudioGenerator::addModel(Model *model) | |
81 { | |
82 if (m_sourceSampleRate == 0) { | |
83 | |
84 m_sourceSampleRate = model->getSampleRate(); | |
85 | |
86 } else { | |
87 | |
88 DenseTimeValueModel *dtvm = | |
89 dynamic_cast<DenseTimeValueModel *>(model); | |
90 | |
91 if (dtvm) { | |
92 m_sourceSampleRate = model->getSampleRate(); | |
93 return true; | |
94 } | |
95 } | |
96 | |
97 RealTimePluginInstance *plugin = loadPluginFor(model); | |
98 if (plugin) { | |
99 QMutexLocker locker(&m_mutex); | |
100 m_synthMap[model] = plugin; | |
101 return true; | |
102 } | |
103 | |
104 return false; | |
105 } | |
106 | |
107 void | |
108 AudioGenerator::playPluginIdChanged(const Model *model, QString) | |
109 { | |
110 if (m_synthMap.find(model) == m_synthMap.end()) return; | |
111 | |
112 RealTimePluginInstance *plugin = loadPluginFor(model); | |
113 if (plugin) { | |
114 QMutexLocker locker(&m_mutex); | |
115 delete m_synthMap[model]; | |
116 m_synthMap[model] = plugin; | |
117 } | |
118 } | |
119 | |
120 void | |
121 AudioGenerator::playPluginConfigurationChanged(const Model *model, | |
122 QString configurationXml) | |
123 { | |
124 // std::cerr << "AudioGenerator::playPluginConfigurationChanged" << std::endl; | |
125 | |
126 if (m_synthMap.find(model) == m_synthMap.end()) { | |
127 std::cerr << "AudioGenerator::playPluginConfigurationChanged: We don't know about this plugin" << std::endl; | |
128 return; | |
129 } | |
130 | |
131 RealTimePluginInstance *plugin = m_synthMap[model]; | |
132 if (plugin) { | |
133 PluginXml(plugin).setParametersFromXml(configurationXml); | |
134 } | |
135 } | |
136 | |
137 QString | |
138 AudioGenerator::getDefaultPlayPluginId(const Model *model) | |
139 { | |
140 const SparseOneDimensionalModel *sodm = | |
141 dynamic_cast<const SparseOneDimensionalModel *>(model); | |
142 if (sodm) { | |
143 return QString("dssi:%1:sample_player"). | |
144 arg(PluginIdentifier::BUILTIN_PLUGIN_SONAME); | |
145 } | |
146 | |
147 const NoteModel *nm = dynamic_cast<const NoteModel *>(model); | |
148 if (nm) { | |
149 return QString("dssi:%1:sample_player"). | |
150 arg(PluginIdentifier::BUILTIN_PLUGIN_SONAME); | |
151 } | |
152 | |
153 return ""; | |
154 } | |
155 | |
156 QString | |
157 AudioGenerator::getDefaultPlayPluginConfiguration(const Model *model) | |
158 { | |
159 QString program = ""; | |
160 | |
161 const SparseOneDimensionalModel *sodm = | |
162 dynamic_cast<const SparseOneDimensionalModel *>(model); | |
163 if (sodm) { | |
164 program = "tap"; | |
165 } | |
166 | |
167 const NoteModel *nm = dynamic_cast<const NoteModel *>(model); | |
168 if (nm) { | |
169 program = "piano"; | |
170 } | |
171 | |
172 if (program == "") return ""; | |
173 | |
174 return | |
175 QString("<plugin configuration=\"%1\" program=\"%2\"/>") | |
176 .arg(XmlExportable::encodeEntities | |
177 (QString("sampledir=%1") | |
178 .arg(PluginXml::encodeConfigurationChars(getSampleDir())))) | |
179 .arg(XmlExportable::encodeEntities(program)); | |
180 } | |
181 | |
182 QString | |
183 AudioGenerator::getSampleDir() | |
184 { | |
185 if (m_sampleDir != "") return m_sampleDir; | |
186 | |
187 try { | |
188 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples"); | |
189 } catch (DirectoryCreationFailed f) { | |
190 std::cerr << "WARNING: AudioGenerator::getSampleDir: Failed to create " | |
191 << "temporary sample directory" << std::endl; | |
192 m_sampleDir = ""; | |
193 return ""; | |
194 } | |
195 | |
196 QDir sampleResourceDir(":/samples", "*.wav"); | |
197 | |
198 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) { | |
199 | |
200 QString fileName(sampleResourceDir[i]); | |
201 QFile file(sampleResourceDir.filePath(fileName)); | |
202 | |
203 if (!file.copy(QDir(m_sampleDir).filePath(fileName))) { | |
204 std::cerr << "WARNING: AudioGenerator::getSampleDir: " | |
205 << "Unable to copy " << fileName.toStdString() | |
206 << " into temporary directory \"" | |
207 << m_sampleDir.toStdString() << "\"" << std::endl; | |
208 } | |
209 } | |
210 | |
211 return m_sampleDir; | |
212 } | |
213 | |
214 void | |
215 AudioGenerator::setSampleDir(RealTimePluginInstance *plugin) | |
216 { | |
217 plugin->configure("sampledir", getSampleDir().toStdString()); | |
218 } | |
219 | |
220 RealTimePluginInstance * | |
221 AudioGenerator::loadPluginFor(const Model *model) | |
222 { | |
223 QString pluginId, configurationXml; | |
224 | |
225 PlayParameters *parameters = | |
226 PlayParameterRepository::getInstance()->getPlayParameters(model); | |
227 if (parameters) { | |
228 pluginId = parameters->getPlayPluginId(); | |
229 configurationXml = parameters->getPlayPluginConfiguration(); | |
230 } | |
231 | |
232 if (pluginId == "") { | |
233 pluginId = getDefaultPlayPluginId(model); | |
234 configurationXml = getDefaultPlayPluginConfiguration(model); | |
235 } | |
236 | |
237 if (pluginId == "") return 0; | |
238 | |
239 RealTimePluginInstance *plugin = loadPlugin(pluginId, ""); | |
240 if (!plugin) return 0; | |
241 | |
242 if (configurationXml != "") { | |
243 PluginXml(plugin).setParametersFromXml(configurationXml); | |
244 } | |
245 | |
246 if (parameters) { | |
247 parameters->setPlayPluginId(pluginId); | |
248 parameters->setPlayPluginConfiguration(configurationXml); | |
249 } | |
250 | |
251 return plugin; | |
252 } | |
253 | |
254 RealTimePluginInstance * | |
255 AudioGenerator::loadPlugin(QString pluginId, QString program) | |
256 { | |
257 RealTimePluginFactory *factory = | |
258 RealTimePluginFactory::instanceFor(pluginId); | |
259 | |
260 if (!factory) { | |
261 std::cerr << "Failed to get plugin factory" << std::endl; | |
262 return false; | |
263 } | |
264 | |
265 RealTimePluginInstance *instance = | |
266 factory->instantiatePlugin | |
267 (pluginId, 0, 0, m_sourceSampleRate, m_pluginBlockSize, m_targetChannelCount); | |
268 | |
269 if (!instance) { | |
270 std::cerr << "Failed to instantiate plugin " << pluginId.toStdString() << std::endl; | |
271 return 0; | |
272 } | |
273 | |
274 setSampleDir(instance); | |
275 | |
276 for (unsigned int i = 0; i < instance->getParameterCount(); ++i) { | |
277 instance->setParameterValue(i, instance->getParameterDefault(i)); | |
278 } | |
279 std::string defaultProgram = instance->getProgram(0, 0); | |
280 if (defaultProgram != "") { | |
281 // std::cerr << "first selecting default program " << defaultProgram << std::endl; | |
282 instance->selectProgram(defaultProgram); | |
283 } | |
284 if (program != "") { | |
285 // std::cerr << "now selecting desired program " << program.toStdString() << std::endl; | |
286 instance->selectProgram(program.toStdString()); | |
287 } | |
288 instance->setIdealChannelCount(m_targetChannelCount); // reset! | |
289 | |
290 return instance; | |
291 } | |
292 | |
293 void | |
294 AudioGenerator::removeModel(Model *model) | |
295 { | |
296 SparseOneDimensionalModel *sodm = | |
297 dynamic_cast<SparseOneDimensionalModel *>(model); | |
298 if (!sodm) return; // nothing to do | |
299 | |
300 QMutexLocker locker(&m_mutex); | |
301 | |
302 if (m_synthMap.find(sodm) == m_synthMap.end()) return; | |
303 | |
304 RealTimePluginInstance *instance = m_synthMap[sodm]; | |
305 m_synthMap.erase(sodm); | |
306 delete instance; | |
307 } | |
308 | |
309 void | |
310 AudioGenerator::clearModels() | |
311 { | |
312 QMutexLocker locker(&m_mutex); | |
313 while (!m_synthMap.empty()) { | |
314 RealTimePluginInstance *instance = m_synthMap.begin()->second; | |
315 m_synthMap.erase(m_synthMap.begin()); | |
316 delete instance; | |
317 } | |
318 } | |
319 | |
320 void | |
321 AudioGenerator::reset() | |
322 { | |
323 QMutexLocker locker(&m_mutex); | |
324 for (PluginMap::iterator i = m_synthMap.begin(); i != m_synthMap.end(); ++i) { | |
325 if (i->second) { | |
326 i->second->silence(); | |
327 i->second->discardEvents(); | |
328 } | |
329 } | |
330 | |
331 m_noteOffs.clear(); | |
332 } | |
333 | |
334 void | |
335 AudioGenerator::setTargetChannelCount(size_t targetChannelCount) | |
336 { | |
337 if (m_targetChannelCount == targetChannelCount) return; | |
338 | |
339 // std::cerr << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << std::endl; | |
340 | |
341 QMutexLocker locker(&m_mutex); | |
342 m_targetChannelCount = targetChannelCount; | |
343 | |
344 for (PluginMap::iterator i = m_synthMap.begin(); i != m_synthMap.end(); ++i) { | |
345 if (i->second) i->second->setIdealChannelCount(targetChannelCount); | |
346 } | |
347 } | |
348 | |
349 size_t | |
350 AudioGenerator::getBlockSize() const | |
351 { | |
352 return m_pluginBlockSize; | |
353 } | |
354 | |
355 size_t | |
356 AudioGenerator::mixModel(Model *model, size_t startFrame, size_t frameCount, | |
357 float **buffer, size_t fadeIn, size_t fadeOut) | |
358 { | |
359 if (m_sourceSampleRate == 0) { | |
360 std::cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << std::endl; | |
361 return frameCount; | |
362 } | |
363 | |
364 QMutexLocker locker(&m_mutex); | |
365 | |
366 PlayParameters *parameters = | |
367 PlayParameterRepository::getInstance()->getPlayParameters(model); | |
368 if (!parameters) return frameCount; | |
369 | |
370 bool playing = !parameters->isPlayMuted(); | |
371 if (!playing) return frameCount; | |
372 | |
373 float gain = parameters->getPlayGain(); | |
374 float pan = parameters->getPlayPan(); | |
375 | |
376 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model); | |
377 if (dtvm) { | |
378 return mixDenseTimeValueModel(dtvm, startFrame, frameCount, | |
379 buffer, gain, pan, fadeIn, fadeOut); | |
380 } | |
381 | |
382 SparseOneDimensionalModel *sodm = dynamic_cast<SparseOneDimensionalModel *> | |
383 (model); | |
384 if (sodm) { | |
385 return mixSparseOneDimensionalModel(sodm, startFrame, frameCount, | |
386 buffer, gain, pan, fadeIn, fadeOut); | |
387 } | |
388 | |
389 NoteModel *nm = dynamic_cast<NoteModel *>(model); | |
390 if (nm) { | |
391 return mixNoteModel(nm, startFrame, frameCount, | |
392 buffer, gain, pan, fadeIn, fadeOut); | |
393 } | |
394 | |
395 return frameCount; | |
396 } | |
397 | |
398 size_t | |
399 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, | |
400 size_t startFrame, size_t frames, | |
401 float **buffer, float gain, float pan, | |
402 size_t fadeIn, size_t fadeOut) | |
403 { | |
404 static float *channelBuffer = 0; | |
405 static size_t channelBufSiz = 0; | |
406 | |
407 size_t totalFrames = frames + fadeIn/2 + fadeOut/2; | |
408 | |
409 if (channelBufSiz < totalFrames) { | |
410 delete[] channelBuffer; | |
411 channelBuffer = new float[totalFrames]; | |
412 channelBufSiz = totalFrames; | |
413 } | |
414 | |
415 size_t got = 0; | |
416 size_t prevChannel = 999; | |
417 | |
418 for (size_t c = 0; c < m_targetChannelCount; ++c) { | |
419 | |
420 size_t sourceChannel = (c % dtvm->getChannelCount()); | |
421 | |
422 // std::cerr << "mixing channel " << c << " from source channel " << sourceChannel << std::endl; | |
423 | |
424 float channelGain = gain; | |
425 if (pan != 0.0) { | |
426 if (c == 0) { | |
427 if (pan > 0.0) channelGain *= 1.0 - pan; | |
428 } else { | |
429 if (pan < 0.0) channelGain *= pan + 1.0; | |
430 } | |
431 } | |
432 | |
433 if (prevChannel != sourceChannel) { | |
434 if (startFrame >= fadeIn/2) { | |
435 got = dtvm->getValues | |
436 (sourceChannel, | |
437 startFrame - fadeIn/2, startFrame + frames + fadeOut/2, | |
438 channelBuffer); | |
439 } else { | |
440 size_t missing = fadeIn/2 - startFrame; | |
441 got = dtvm->getValues | |
442 (sourceChannel, | |
443 0, startFrame + frames + fadeOut/2, | |
444 channelBuffer + missing); | |
445 } | |
446 } | |
447 prevChannel = sourceChannel; | |
448 | |
449 for (size_t i = 0; i < fadeIn/2; ++i) { | |
450 float *back = buffer[c]; | |
451 back -= fadeIn/2; | |
452 back[i] += (channelGain * channelBuffer[i] * i) / fadeIn; | |
453 } | |
454 | |
455 for (size_t i = 0; i < frames + fadeOut/2; ++i) { | |
456 float mult = channelGain; | |
457 if (i < fadeIn/2) { | |
458 mult = (mult * i) / fadeIn; | |
459 } | |
460 if (i > frames - fadeOut/2) { | |
461 mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut; | |
462 } | |
463 buffer[c][i] += mult * channelBuffer[i]; | |
464 } | |
465 } | |
466 | |
467 return got; | |
468 } | |
469 | |
470 size_t | |
471 AudioGenerator::mixSparseOneDimensionalModel(SparseOneDimensionalModel *sodm, | |
472 size_t startFrame, size_t frames, | |
473 float **buffer, float gain, float pan, | |
474 size_t /* fadeIn */, | |
475 size_t /* fadeOut */) | |
476 { | |
477 RealTimePluginInstance *plugin = m_synthMap[sodm]; | |
478 if (!plugin) return 0; | |
479 | |
480 size_t latency = plugin->getLatency(); | |
481 size_t blocks = frames / m_pluginBlockSize; | |
482 | |
483 //!!! hang on -- the fact that the audio callback play source's | |
484 //buffer is a multiple of the plugin's buffer size doesn't mean | |
485 //that we always get called for a multiple of it here (because it | |
486 //also depends on the JACK block size). how should we ensure that | |
487 //all models write the same amount in to the mix, and that we | |
488 //always have a multiple of the plugin buffer size? I guess this | |
489 //class has to be queryable for the plugin buffer size & the | |
490 //callback play source has to use that as a multiple for all the | |
491 //calls to mixModel | |
492 | |
493 size_t got = blocks * m_pluginBlockSize; | |
494 | |
495 #ifdef DEBUG_AUDIO_GENERATOR | |
496 std::cout << "mixModel [sparse]: frames " << frames | |
497 << ", blocks " << blocks << std::endl; | |
498 #endif | |
499 | |
500 snd_seq_event_t onEv; | |
501 onEv.type = SND_SEQ_EVENT_NOTEON; | |
502 onEv.data.note.channel = 0; | |
503 onEv.data.note.note = 64; | |
504 onEv.data.note.velocity = 127; | |
505 | |
506 snd_seq_event_t offEv; | |
507 offEv.type = SND_SEQ_EVENT_NOTEOFF; | |
508 offEv.data.note.channel = 0; | |
509 offEv.data.note.velocity = 0; | |
510 | |
511 NoteOffSet ¬eOffs = m_noteOffs[sodm]; | |
512 | |
513 for (size_t i = 0; i < blocks; ++i) { | |
514 | |
515 size_t reqStart = startFrame + i * m_pluginBlockSize; | |
516 | |
517 SparseOneDimensionalModel::PointList points = | |
518 sodm->getPoints(reqStart + latency, | |
519 reqStart + latency + m_pluginBlockSize); | |
520 | |
521 Vamp::RealTime blockTime = Vamp::RealTime::frame2RealTime | |
522 (startFrame + i * m_pluginBlockSize, m_sourceSampleRate); | |
523 | |
524 for (SparseOneDimensionalModel::PointList::iterator pli = | |
525 points.begin(); pli != points.end(); ++pli) { | |
526 | |
527 size_t pliFrame = pli->frame; | |
528 | |
529 if (pliFrame >= latency) pliFrame -= latency; | |
530 | |
531 if (pliFrame < reqStart || | |
532 pliFrame >= reqStart + m_pluginBlockSize) continue; | |
533 | |
534 while (noteOffs.begin() != noteOffs.end() && | |
535 noteOffs.begin()->frame <= pliFrame) { | |
536 | |
537 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime | |
538 (noteOffs.begin()->frame, m_sourceSampleRate); | |
539 | |
540 offEv.data.note.note = noteOffs.begin()->pitch; | |
541 | |
542 #ifdef DEBUG_AUDIO_GENERATOR | |
543 std::cerr << "mixModel [sparse]: sending note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl; | |
544 #endif | |
545 | |
546 plugin->sendEvent(eventTime, &offEv); | |
547 noteOffs.erase(noteOffs.begin()); | |
548 } | |
549 | |
550 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime | |
551 (pliFrame, m_sourceSampleRate); | |
552 | |
553 plugin->sendEvent(eventTime, &onEv); | |
554 | |
555 #ifdef DEBUG_AUDIO_GENERATOR | |
556 std::cout << "mixModel [sparse]: point at frame " << pliFrame << ", block start " << (startFrame + i * m_pluginBlockSize) << ", resulting time " << eventTime << std::endl; | |
557 #endif | |
558 | |
559 size_t duration = 7000; // frames [for now] | |
560 NoteOff noff; | |
561 noff.pitch = onEv.data.note.note; | |
562 noff.frame = pliFrame + duration; | |
563 noteOffs.insert(noff); | |
564 } | |
565 | |
566 while (noteOffs.begin() != noteOffs.end() && | |
567 noteOffs.begin()->frame <= | |
568 startFrame + i * m_pluginBlockSize + m_pluginBlockSize) { | |
569 | |
570 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime | |
571 (noteOffs.begin()->frame, m_sourceSampleRate); | |
572 | |
573 offEv.data.note.note = noteOffs.begin()->pitch; | |
574 | |
575 #ifdef DEBUG_AUDIO_GENERATOR | |
576 std::cerr << "mixModel [sparse]: sending leftover note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl; | |
577 #endif | |
578 | |
579 plugin->sendEvent(eventTime, &offEv); | |
580 noteOffs.erase(noteOffs.begin()); | |
581 } | |
582 | |
583 plugin->run(blockTime); | |
584 float **outs = plugin->getAudioOutputBuffers(); | |
585 | |
586 for (size_t c = 0; c < m_targetChannelCount; ++c) { | |
587 #ifdef DEBUG_AUDIO_GENERATOR | |
588 std::cout << "mixModel [sparse]: adding " << m_pluginBlockSize << " samples from plugin output " << c << std::endl; | |
589 #endif | |
590 | |
591 size_t sourceChannel = (c % plugin->getAudioOutputCount()); | |
592 | |
593 float channelGain = gain; | |
594 if (pan != 0.0) { | |
595 if (c == 0) { | |
596 if (pan > 0.0) channelGain *= 1.0 - pan; | |
597 } else { | |
598 if (pan < 0.0) channelGain *= pan + 1.0; | |
599 } | |
600 } | |
601 | |
602 for (size_t j = 0; j < m_pluginBlockSize; ++j) { | |
603 buffer[c][i * m_pluginBlockSize + j] += | |
604 channelGain * outs[sourceChannel][j]; | |
605 } | |
606 } | |
607 } | |
608 | |
609 return got; | |
610 } | |
611 | |
612 | |
613 //!!! mucho duplication with above -- refactor | |
614 size_t | |
615 AudioGenerator::mixNoteModel(NoteModel *nm, | |
616 size_t startFrame, size_t frames, | |
617 float **buffer, float gain, float pan, | |
618 size_t /* fadeIn */, | |
619 size_t /* fadeOut */) | |
620 { | |
621 RealTimePluginInstance *plugin = m_synthMap[nm]; | |
622 if (!plugin) return 0; | |
623 | |
624 size_t latency = plugin->getLatency(); | |
625 size_t blocks = frames / m_pluginBlockSize; | |
626 | |
627 //!!! hang on -- the fact that the audio callback play source's | |
628 //buffer is a multiple of the plugin's buffer size doesn't mean | |
629 //that we always get called for a multiple of it here (because it | |
630 //also depends on the JACK block size). how should we ensure that | |
631 //all models write the same amount in to the mix, and that we | |
632 //always have a multiple of the plugin buffer size? I guess this | |
633 //class has to be queryable for the plugin buffer size & the | |
634 //callback play source has to use that as a multiple for all the | |
635 //calls to mixModel | |
636 | |
637 size_t got = blocks * m_pluginBlockSize; | |
638 | |
639 #ifdef DEBUG_AUDIO_GENERATOR | |
640 std::cout << "mixModel [note]: frames " << frames | |
641 << ", blocks " << blocks << std::endl; | |
642 #endif | |
643 | |
644 snd_seq_event_t onEv; | |
645 onEv.type = SND_SEQ_EVENT_NOTEON; | |
646 onEv.data.note.channel = 0; | |
647 onEv.data.note.note = 64; | |
648 onEv.data.note.velocity = 127; | |
649 | |
650 snd_seq_event_t offEv; | |
651 offEv.type = SND_SEQ_EVENT_NOTEOFF; | |
652 offEv.data.note.channel = 0; | |
653 offEv.data.note.velocity = 0; | |
654 | |
655 NoteOffSet ¬eOffs = m_noteOffs[nm]; | |
656 | |
657 for (size_t i = 0; i < blocks; ++i) { | |
658 | |
659 size_t reqStart = startFrame + i * m_pluginBlockSize; | |
660 | |
661 NoteModel::PointList points = | |
662 nm->getPoints(reqStart + latency, | |
663 reqStart + latency + m_pluginBlockSize); | |
664 | |
665 Vamp::RealTime blockTime = Vamp::RealTime::frame2RealTime | |
666 (startFrame + i * m_pluginBlockSize, m_sourceSampleRate); | |
667 | |
668 for (NoteModel::PointList::iterator pli = | |
669 points.begin(); pli != points.end(); ++pli) { | |
670 | |
671 size_t pliFrame = pli->frame; | |
672 | |
673 if (pliFrame >= latency) pliFrame -= latency; | |
674 | |
675 if (pliFrame < reqStart || | |
676 pliFrame >= reqStart + m_pluginBlockSize) continue; | |
677 | |
678 while (noteOffs.begin() != noteOffs.end() && | |
679 noteOffs.begin()->frame <= pliFrame) { | |
680 | |
681 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime | |
682 (noteOffs.begin()->frame, m_sourceSampleRate); | |
683 | |
684 offEv.data.note.note = noteOffs.begin()->pitch; | |
685 | |
686 #ifdef DEBUG_AUDIO_GENERATOR | |
687 std::cerr << "mixModel [note]: sending note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl; | |
688 #endif | |
689 | |
690 plugin->sendEvent(eventTime, &offEv); | |
691 noteOffs.erase(noteOffs.begin()); | |
692 } | |
693 | |
694 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime | |
695 (pliFrame, m_sourceSampleRate); | |
696 | |
697 if (nm->getScaleUnits() == "Hz") { | |
698 onEv.data.note.note = Pitch::getPitchForFrequency(pli->value); | |
699 } else { | |
700 onEv.data.note.note = lrintf(pli->value); | |
701 } | |
702 | |
703 plugin->sendEvent(eventTime, &onEv); | |
704 | |
705 #ifdef DEBUG_AUDIO_GENERATOR | |
706 std::cout << "mixModel [note]: point at frame " << pliFrame << ", block start " << (startFrame + i * m_pluginBlockSize) << ", resulting time " << eventTime << std::endl; | |
707 #endif | |
708 | |
709 size_t duration = pli->duration; | |
710 if (duration == 0 || duration == 1) { | |
711 duration = m_sourceSampleRate / 20; | |
712 } | |
713 NoteOff noff; | |
714 noff.pitch = onEv.data.note.note; | |
715 noff.frame = pliFrame + duration; | |
716 noteOffs.insert(noff); | |
717 } | |
718 | |
719 while (noteOffs.begin() != noteOffs.end() && | |
720 noteOffs.begin()->frame <= | |
721 startFrame + i * m_pluginBlockSize + m_pluginBlockSize) { | |
722 | |
723 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime | |
724 (noteOffs.begin()->frame, m_sourceSampleRate); | |
725 | |
726 offEv.data.note.note = noteOffs.begin()->pitch; | |
727 | |
728 #ifdef DEBUG_AUDIO_GENERATOR | |
729 std::cerr << "mixModel [note]: sending leftover note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl; | |
730 #endif | |
731 | |
732 plugin->sendEvent(eventTime, &offEv); | |
733 noteOffs.erase(noteOffs.begin()); | |
734 } | |
735 | |
736 plugin->run(blockTime); | |
737 float **outs = plugin->getAudioOutputBuffers(); | |
738 | |
739 for (size_t c = 0; c < m_targetChannelCount; ++c) { | |
740 #ifdef DEBUG_AUDIO_GENERATOR | |
741 std::cout << "mixModel [note]: adding " << m_pluginBlockSize << " samples from plugin output " << c << std::endl; | |
742 #endif | |
743 | |
744 size_t sourceChannel = (c % plugin->getAudioOutputCount()); | |
745 | |
746 float channelGain = gain; | |
747 if (pan != 0.0) { | |
748 if (c == 0) { | |
749 if (pan > 0.0) channelGain *= 1.0 - pan; | |
750 } else { | |
751 if (pan < 0.0) channelGain *= pan + 1.0; | |
752 } | |
753 } | |
754 | |
755 for (size_t j = 0; j < m_pluginBlockSize; ++j) { | |
756 buffer[c][i * m_pluginBlockSize + j] += | |
757 channelGain * outs[sourceChannel][j]; | |
758 } | |
759 } | |
760 } | |
761 | |
762 return got; | |
763 } | |
764 |