Mercurial > hg > svapp
comparison audioio/AudioGenerator.cpp @ 43:3c5756fb6a68
* Move some things around to facilitate plundering libraries for other
applications without needing to duplicate so much code.
sv/osc -> data/osc
sv/audioio -> audioio
sv/transform -> plugin/transform
sv/document -> document (will rename to framework in next commit)
author | Chris Cannam |
---|---|
date | Wed, 24 Oct 2007 16:34:31 +0000 |
parents | |
children | 215b8b1b0308 89a689720ee9 |
comparison
equal
deleted
inserted
replaced
42:0619006a1ee3 | 43:3c5756fb6a68 |
---|---|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ | |
2 | |
3 /* | |
4 Sonic Visualiser | |
5 An audio file viewer and annotation editor. | |
6 Centre for Digital Music, Queen Mary, University of London. | |
7 This file copyright 2006 Chris Cannam. | |
8 | |
9 This program is free software; you can redistribute it and/or | |
10 modify it under the terms of the GNU General Public License as | |
11 published by the Free Software Foundation; either version 2 of the | |
12 License, or (at your option) any later version. See the file | |
13 COPYING included with this distribution for more information. | |
14 */ | |
15 | |
16 #include "AudioGenerator.h" | |
17 | |
18 #include "base/TempDirectory.h" | |
19 #include "base/PlayParameters.h" | |
20 #include "base/PlayParameterRepository.h" | |
21 #include "base/Pitch.h" | |
22 #include "base/Exceptions.h" | |
23 | |
24 #include "data/model/NoteModel.h" | |
25 #include "data/model/DenseTimeValueModel.h" | |
26 #include "data/model/SparseOneDimensionalModel.h" | |
27 | |
28 #include "plugin/RealTimePluginFactory.h" | |
29 #include "plugin/RealTimePluginInstance.h" | |
30 #include "plugin/PluginIdentifier.h" | |
31 #include "plugin/PluginXml.h" | |
32 #include "plugin/api/alsa/seq_event.h" | |
33 | |
34 #include <iostream> | |
35 #include <math.h> | |
36 | |
37 #include <QDir> | |
38 #include <QFile> | |
39 | |
40 const size_t | |
41 AudioGenerator::m_pluginBlockSize = 2048; | |
42 | |
43 QString | |
44 AudioGenerator::m_sampleDir = ""; | |
45 | |
46 //#define DEBUG_AUDIO_GENERATOR 1 | |
47 | |
48 AudioGenerator::AudioGenerator() : | |
49 m_sourceSampleRate(0), | |
50 m_targetChannelCount(1), | |
51 m_soloing(false) | |
52 { | |
53 connect(PlayParameterRepository::getInstance(), | |
54 SIGNAL(playPluginIdChanged(const Model *, QString)), | |
55 this, | |
56 SLOT(playPluginIdChanged(const Model *, QString))); | |
57 | |
58 connect(PlayParameterRepository::getInstance(), | |
59 SIGNAL(playPluginConfigurationChanged(const Model *, QString)), | |
60 this, | |
61 SLOT(playPluginConfigurationChanged(const Model *, QString))); | |
62 } | |
63 | |
64 AudioGenerator::~AudioGenerator() | |
65 { | |
66 } | |
67 | |
68 bool | |
69 AudioGenerator::canPlay(const Model *model) | |
70 { | |
71 if (dynamic_cast<const DenseTimeValueModel *>(model) || | |
72 dynamic_cast<const SparseOneDimensionalModel *>(model) || | |
73 dynamic_cast<const NoteModel *>(model)) { | |
74 return true; | |
75 } else { | |
76 return false; | |
77 } | |
78 } | |
79 | |
80 bool | |
81 AudioGenerator::addModel(Model *model) | |
82 { | |
83 if (m_sourceSampleRate == 0) { | |
84 | |
85 m_sourceSampleRate = model->getSampleRate(); | |
86 | |
87 } else { | |
88 | |
89 DenseTimeValueModel *dtvm = | |
90 dynamic_cast<DenseTimeValueModel *>(model); | |
91 | |
92 if (dtvm) { | |
93 m_sourceSampleRate = model->getSampleRate(); | |
94 return true; | |
95 } | |
96 } | |
97 | |
98 RealTimePluginInstance *plugin = loadPluginFor(model); | |
99 if (plugin) { | |
100 QMutexLocker locker(&m_mutex); | |
101 m_synthMap[model] = plugin; | |
102 return true; | |
103 } | |
104 | |
105 return false; | |
106 } | |
107 | |
108 void | |
109 AudioGenerator::playPluginIdChanged(const Model *model, QString) | |
110 { | |
111 if (m_synthMap.find(model) == m_synthMap.end()) return; | |
112 | |
113 RealTimePluginInstance *plugin = loadPluginFor(model); | |
114 if (plugin) { | |
115 QMutexLocker locker(&m_mutex); | |
116 delete m_synthMap[model]; | |
117 m_synthMap[model] = plugin; | |
118 } | |
119 } | |
120 | |
121 void | |
122 AudioGenerator::playPluginConfigurationChanged(const Model *model, | |
123 QString configurationXml) | |
124 { | |
125 // std::cerr << "AudioGenerator::playPluginConfigurationChanged" << std::endl; | |
126 | |
127 if (m_synthMap.find(model) == m_synthMap.end()) { | |
128 std::cerr << "AudioGenerator::playPluginConfigurationChanged: We don't know about this plugin" << std::endl; | |
129 return; | |
130 } | |
131 | |
132 RealTimePluginInstance *plugin = m_synthMap[model]; | |
133 if (plugin) { | |
134 PluginXml(plugin).setParametersFromXml(configurationXml); | |
135 } | |
136 } | |
137 | |
138 QString | |
139 AudioGenerator::getDefaultPlayPluginId(const Model *model) | |
140 { | |
141 const SparseOneDimensionalModel *sodm = | |
142 dynamic_cast<const SparseOneDimensionalModel *>(model); | |
143 if (sodm) { | |
144 return QString("dssi:%1:sample_player"). | |
145 arg(PluginIdentifier::BUILTIN_PLUGIN_SONAME); | |
146 } | |
147 | |
148 const NoteModel *nm = dynamic_cast<const NoteModel *>(model); | |
149 if (nm) { | |
150 return QString("dssi:%1:sample_player"). | |
151 arg(PluginIdentifier::BUILTIN_PLUGIN_SONAME); | |
152 } | |
153 | |
154 return ""; | |
155 } | |
156 | |
157 QString | |
158 AudioGenerator::getDefaultPlayPluginConfiguration(const Model *model) | |
159 { | |
160 QString program = ""; | |
161 | |
162 const SparseOneDimensionalModel *sodm = | |
163 dynamic_cast<const SparseOneDimensionalModel *>(model); | |
164 if (sodm) { | |
165 program = "tap"; | |
166 } | |
167 | |
168 const NoteModel *nm = dynamic_cast<const NoteModel *>(model); | |
169 if (nm) { | |
170 program = "piano"; | |
171 } | |
172 | |
173 if (program == "") return ""; | |
174 | |
175 return | |
176 QString("<plugin configuration=\"%1\" program=\"%2\"/>") | |
177 .arg(XmlExportable::encodeEntities | |
178 (QString("sampledir=%1") | |
179 .arg(PluginXml::encodeConfigurationChars(getSampleDir())))) | |
180 .arg(XmlExportable::encodeEntities(program)); | |
181 } | |
182 | |
183 QString | |
184 AudioGenerator::getSampleDir() | |
185 { | |
186 if (m_sampleDir != "") return m_sampleDir; | |
187 | |
188 try { | |
189 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples"); | |
190 } catch (DirectoryCreationFailed f) { | |
191 std::cerr << "WARNING: AudioGenerator::getSampleDir: Failed to create " | |
192 << "temporary sample directory" << std::endl; | |
193 m_sampleDir = ""; | |
194 return ""; | |
195 } | |
196 | |
197 QDir sampleResourceDir(":/samples", "*.wav"); | |
198 | |
199 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) { | |
200 | |
201 QString fileName(sampleResourceDir[i]); | |
202 QFile file(sampleResourceDir.filePath(fileName)); | |
203 | |
204 if (!file.copy(QDir(m_sampleDir).filePath(fileName))) { | |
205 std::cerr << "WARNING: AudioGenerator::getSampleDir: " | |
206 << "Unable to copy " << fileName.toStdString() | |
207 << " into temporary directory \"" | |
208 << m_sampleDir.toStdString() << "\"" << std::endl; | |
209 } | |
210 } | |
211 | |
212 return m_sampleDir; | |
213 } | |
214 | |
215 void | |
216 AudioGenerator::setSampleDir(RealTimePluginInstance *plugin) | |
217 { | |
218 plugin->configure("sampledir", getSampleDir().toStdString()); | |
219 } | |
220 | |
221 RealTimePluginInstance * | |
222 AudioGenerator::loadPluginFor(const Model *model) | |
223 { | |
224 QString pluginId, configurationXml; | |
225 | |
226 PlayParameters *parameters = | |
227 PlayParameterRepository::getInstance()->getPlayParameters(model); | |
228 if (parameters) { | |
229 pluginId = parameters->getPlayPluginId(); | |
230 configurationXml = parameters->getPlayPluginConfiguration(); | |
231 } | |
232 | |
233 if (pluginId == "") { | |
234 pluginId = getDefaultPlayPluginId(model); | |
235 configurationXml = getDefaultPlayPluginConfiguration(model); | |
236 } | |
237 | |
238 if (pluginId == "") return 0; | |
239 | |
240 RealTimePluginInstance *plugin = loadPlugin(pluginId, ""); | |
241 if (!plugin) return 0; | |
242 | |
243 if (configurationXml != "") { | |
244 PluginXml(plugin).setParametersFromXml(configurationXml); | |
245 } | |
246 | |
247 if (parameters) { | |
248 parameters->setPlayPluginId(pluginId); | |
249 parameters->setPlayPluginConfiguration(configurationXml); | |
250 } | |
251 | |
252 return plugin; | |
253 } | |
254 | |
255 RealTimePluginInstance * | |
256 AudioGenerator::loadPlugin(QString pluginId, QString program) | |
257 { | |
258 RealTimePluginFactory *factory = | |
259 RealTimePluginFactory::instanceFor(pluginId); | |
260 | |
261 if (!factory) { | |
262 std::cerr << "Failed to get plugin factory" << std::endl; | |
263 return false; | |
264 } | |
265 | |
266 RealTimePluginInstance *instance = | |
267 factory->instantiatePlugin | |
268 (pluginId, 0, 0, m_sourceSampleRate, m_pluginBlockSize, m_targetChannelCount); | |
269 | |
270 if (!instance) { | |
271 std::cerr << "Failed to instantiate plugin " << pluginId.toStdString() << std::endl; | |
272 return 0; | |
273 } | |
274 | |
275 setSampleDir(instance); | |
276 | |
277 for (unsigned int i = 0; i < instance->getParameterCount(); ++i) { | |
278 instance->setParameterValue(i, instance->getParameterDefault(i)); | |
279 } | |
280 std::string defaultProgram = instance->getProgram(0, 0); | |
281 if (defaultProgram != "") { | |
282 // std::cerr << "first selecting default program " << defaultProgram << std::endl; | |
283 instance->selectProgram(defaultProgram); | |
284 } | |
285 if (program != "") { | |
286 // std::cerr << "now selecting desired program " << program.toStdString() << std::endl; | |
287 instance->selectProgram(program.toStdString()); | |
288 } | |
289 instance->setIdealChannelCount(m_targetChannelCount); // reset! | |
290 | |
291 return instance; | |
292 } | |
293 | |
294 void | |
295 AudioGenerator::removeModel(Model *model) | |
296 { | |
297 SparseOneDimensionalModel *sodm = | |
298 dynamic_cast<SparseOneDimensionalModel *>(model); | |
299 if (!sodm) return; // nothing to do | |
300 | |
301 QMutexLocker locker(&m_mutex); | |
302 | |
303 if (m_synthMap.find(sodm) == m_synthMap.end()) return; | |
304 | |
305 RealTimePluginInstance *instance = m_synthMap[sodm]; | |
306 m_synthMap.erase(sodm); | |
307 delete instance; | |
308 } | |
309 | |
310 void | |
311 AudioGenerator::clearModels() | |
312 { | |
313 QMutexLocker locker(&m_mutex); | |
314 while (!m_synthMap.empty()) { | |
315 RealTimePluginInstance *instance = m_synthMap.begin()->second; | |
316 m_synthMap.erase(m_synthMap.begin()); | |
317 delete instance; | |
318 } | |
319 } | |
320 | |
321 void | |
322 AudioGenerator::reset() | |
323 { | |
324 QMutexLocker locker(&m_mutex); | |
325 for (PluginMap::iterator i = m_synthMap.begin(); i != m_synthMap.end(); ++i) { | |
326 if (i->second) { | |
327 i->second->silence(); | |
328 i->second->discardEvents(); | |
329 } | |
330 } | |
331 | |
332 m_noteOffs.clear(); | |
333 } | |
334 | |
335 void | |
336 AudioGenerator::setTargetChannelCount(size_t targetChannelCount) | |
337 { | |
338 if (m_targetChannelCount == targetChannelCount) return; | |
339 | |
340 // std::cerr << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << std::endl; | |
341 | |
342 QMutexLocker locker(&m_mutex); | |
343 m_targetChannelCount = targetChannelCount; | |
344 | |
345 for (PluginMap::iterator i = m_synthMap.begin(); i != m_synthMap.end(); ++i) { | |
346 if (i->second) i->second->setIdealChannelCount(targetChannelCount); | |
347 } | |
348 } | |
349 | |
350 size_t | |
351 AudioGenerator::getBlockSize() const | |
352 { | |
353 return m_pluginBlockSize; | |
354 } | |
355 | |
356 void | |
357 AudioGenerator::setSoloModelSet(std::set<Model *> s) | |
358 { | |
359 QMutexLocker locker(&m_mutex); | |
360 | |
361 m_soloModelSet = s; | |
362 m_soloing = true; | |
363 } | |
364 | |
365 void | |
366 AudioGenerator::clearSoloModelSet() | |
367 { | |
368 QMutexLocker locker(&m_mutex); | |
369 | |
370 m_soloModelSet.clear(); | |
371 m_soloing = false; | |
372 } | |
373 | |
374 size_t | |
375 AudioGenerator::mixModel(Model *model, size_t startFrame, size_t frameCount, | |
376 float **buffer, size_t fadeIn, size_t fadeOut) | |
377 { | |
378 if (m_sourceSampleRate == 0) { | |
379 std::cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << std::endl; | |
380 return frameCount; | |
381 } | |
382 | |
383 QMutexLocker locker(&m_mutex); | |
384 | |
385 PlayParameters *parameters = | |
386 PlayParameterRepository::getInstance()->getPlayParameters(model); | |
387 if (!parameters) return frameCount; | |
388 | |
389 bool playing = !parameters->isPlayMuted(); | |
390 if (!playing) { | |
391 #ifdef DEBUG_AUDIO_GENERATOR | |
392 std::cout << "AudioGenerator::mixModel(" << model << "): muted" << std::endl; | |
393 #endif | |
394 return frameCount; | |
395 } | |
396 | |
397 if (m_soloing) { | |
398 if (m_soloModelSet.find(model) == m_soloModelSet.end()) { | |
399 #ifdef DEBUG_AUDIO_GENERATOR | |
400 std::cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << std::endl; | |
401 #endif | |
402 return frameCount; | |
403 } | |
404 } | |
405 | |
406 float gain = parameters->getPlayGain(); | |
407 float pan = parameters->getPlayPan(); | |
408 | |
409 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model); | |
410 if (dtvm) { | |
411 return mixDenseTimeValueModel(dtvm, startFrame, frameCount, | |
412 buffer, gain, pan, fadeIn, fadeOut); | |
413 } | |
414 | |
415 SparseOneDimensionalModel *sodm = dynamic_cast<SparseOneDimensionalModel *> | |
416 (model); | |
417 if (sodm) { | |
418 return mixSparseOneDimensionalModel(sodm, startFrame, frameCount, | |
419 buffer, gain, pan, fadeIn, fadeOut); | |
420 } | |
421 | |
422 NoteModel *nm = dynamic_cast<NoteModel *>(model); | |
423 if (nm) { | |
424 return mixNoteModel(nm, startFrame, frameCount, | |
425 buffer, gain, pan, fadeIn, fadeOut); | |
426 } | |
427 | |
428 return frameCount; | |
429 } | |
430 | |
431 size_t | |
432 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, | |
433 size_t startFrame, size_t frames, | |
434 float **buffer, float gain, float pan, | |
435 size_t fadeIn, size_t fadeOut) | |
436 { | |
437 static float *channelBuffer = 0; | |
438 static size_t channelBufSiz = 0; | |
439 | |
440 size_t totalFrames = frames + fadeIn/2 + fadeOut/2; | |
441 | |
442 if (channelBufSiz < totalFrames) { | |
443 delete[] channelBuffer; | |
444 channelBuffer = new float[totalFrames]; | |
445 channelBufSiz = totalFrames; | |
446 } | |
447 | |
448 size_t got = 0; | |
449 size_t prevChannel = 999; | |
450 | |
451 for (size_t c = 0; c < m_targetChannelCount; ++c) { | |
452 | |
453 size_t sourceChannel = (c % dtvm->getChannelCount()); | |
454 | |
455 // std::cerr << "mixing channel " << c << " from source channel " << sourceChannel << std::endl; | |
456 | |
457 float channelGain = gain; | |
458 if (pan != 0.0) { | |
459 if (c == 0) { | |
460 if (pan > 0.0) channelGain *= 1.0 - pan; | |
461 } else { | |
462 if (pan < 0.0) channelGain *= pan + 1.0; | |
463 } | |
464 } | |
465 | |
466 if (prevChannel != sourceChannel) { | |
467 if (startFrame >= fadeIn/2) { | |
468 got = dtvm->getData | |
469 (sourceChannel, | |
470 startFrame - fadeIn/2, | |
471 frames + fadeOut/2 + fadeIn/2, | |
472 channelBuffer); | |
473 } else { | |
474 size_t missing = fadeIn/2 - startFrame; | |
475 got = dtvm->getData | |
476 (sourceChannel, | |
477 startFrame, | |
478 frames + fadeOut/2, | |
479 channelBuffer + missing); | |
480 } | |
481 } | |
482 prevChannel = sourceChannel; | |
483 | |
484 for (size_t i = 0; i < fadeIn/2; ++i) { | |
485 float *back = buffer[c]; | |
486 back -= fadeIn/2; | |
487 back[i] += (channelGain * channelBuffer[i] * i) / fadeIn; | |
488 } | |
489 | |
490 for (size_t i = 0; i < frames + fadeOut/2; ++i) { | |
491 float mult = channelGain; | |
492 if (i < fadeIn/2) { | |
493 mult = (mult * i) / fadeIn; | |
494 } | |
495 if (i > frames - fadeOut/2) { | |
496 mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut; | |
497 } | |
498 buffer[c][i] += mult * channelBuffer[i]; | |
499 } | |
500 } | |
501 | |
502 return got; | |
503 } | |
504 | |
505 size_t | |
506 AudioGenerator::mixSparseOneDimensionalModel(SparseOneDimensionalModel *sodm, | |
507 size_t startFrame, size_t frames, | |
508 float **buffer, float gain, float pan, | |
509 size_t /* fadeIn */, | |
510 size_t /* fadeOut */) | |
511 { | |
512 RealTimePluginInstance *plugin = m_synthMap[sodm]; | |
513 if (!plugin) return 0; | |
514 | |
515 size_t latency = plugin->getLatency(); | |
516 size_t blocks = frames / m_pluginBlockSize; | |
517 | |
518 //!!! hang on -- the fact that the audio callback play source's | |
519 //buffer is a multiple of the plugin's buffer size doesn't mean | |
520 //that we always get called for a multiple of it here (because it | |
521 //also depends on the JACK block size). how should we ensure that | |
522 //all models write the same amount in to the mix, and that we | |
523 //always have a multiple of the plugin buffer size? I guess this | |
524 //class has to be queryable for the plugin buffer size & the | |
525 //callback play source has to use that as a multiple for all the | |
526 //calls to mixModel | |
527 | |
528 size_t got = blocks * m_pluginBlockSize; | |
529 | |
530 #ifdef DEBUG_AUDIO_GENERATOR | |
531 std::cout << "mixModel [sparse]: frames " << frames | |
532 << ", blocks " << blocks << std::endl; | |
533 #endif | |
534 | |
535 snd_seq_event_t onEv; | |
536 onEv.type = SND_SEQ_EVENT_NOTEON; | |
537 onEv.data.note.channel = 0; | |
538 onEv.data.note.note = 64; | |
539 onEv.data.note.velocity = 100; | |
540 | |
541 snd_seq_event_t offEv; | |
542 offEv.type = SND_SEQ_EVENT_NOTEOFF; | |
543 offEv.data.note.channel = 0; | |
544 offEv.data.note.velocity = 0; | |
545 | |
546 NoteOffSet ¬eOffs = m_noteOffs[sodm]; | |
547 | |
548 for (size_t i = 0; i < blocks; ++i) { | |
549 | |
550 size_t reqStart = startFrame + i * m_pluginBlockSize; | |
551 | |
552 SparseOneDimensionalModel::PointList points = | |
553 sodm->getPoints(reqStart + latency, | |
554 reqStart + latency + m_pluginBlockSize); | |
555 | |
556 Vamp::RealTime blockTime = Vamp::RealTime::frame2RealTime | |
557 (startFrame + i * m_pluginBlockSize, m_sourceSampleRate); | |
558 | |
559 for (SparseOneDimensionalModel::PointList::iterator pli = | |
560 points.begin(); pli != points.end(); ++pli) { | |
561 | |
562 size_t pliFrame = pli->frame; | |
563 | |
564 if (pliFrame >= latency) pliFrame -= latency; | |
565 | |
566 if (pliFrame < reqStart || | |
567 pliFrame >= reqStart + m_pluginBlockSize) continue; | |
568 | |
569 while (noteOffs.begin() != noteOffs.end() && | |
570 noteOffs.begin()->frame <= pliFrame) { | |
571 | |
572 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime | |
573 (noteOffs.begin()->frame, m_sourceSampleRate); | |
574 | |
575 offEv.data.note.note = noteOffs.begin()->pitch; | |
576 | |
577 #ifdef DEBUG_AUDIO_GENERATOR | |
578 std::cerr << "mixModel [sparse]: sending note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl; | |
579 #endif | |
580 | |
581 plugin->sendEvent(eventTime, &offEv); | |
582 noteOffs.erase(noteOffs.begin()); | |
583 } | |
584 | |
585 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime | |
586 (pliFrame, m_sourceSampleRate); | |
587 | |
588 plugin->sendEvent(eventTime, &onEv); | |
589 | |
590 #ifdef DEBUG_AUDIO_GENERATOR | |
591 std::cout << "mixModel [sparse]: point at frame " << pliFrame << ", block start " << (startFrame + i * m_pluginBlockSize) << ", resulting time " << eventTime << std::endl; | |
592 #endif | |
593 | |
594 size_t duration = 7000; // frames [for now] | |
595 NoteOff noff; | |
596 noff.pitch = onEv.data.note.note; | |
597 noff.frame = pliFrame + duration; | |
598 noteOffs.insert(noff); | |
599 } | |
600 | |
601 while (noteOffs.begin() != noteOffs.end() && | |
602 noteOffs.begin()->frame <= | |
603 startFrame + i * m_pluginBlockSize + m_pluginBlockSize) { | |
604 | |
605 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime | |
606 (noteOffs.begin()->frame, m_sourceSampleRate); | |
607 | |
608 offEv.data.note.note = noteOffs.begin()->pitch; | |
609 | |
610 #ifdef DEBUG_AUDIO_GENERATOR | |
611 std::cerr << "mixModel [sparse]: sending leftover note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl; | |
612 #endif | |
613 | |
614 plugin->sendEvent(eventTime, &offEv); | |
615 noteOffs.erase(noteOffs.begin()); | |
616 } | |
617 | |
618 plugin->run(blockTime); | |
619 float **outs = plugin->getAudioOutputBuffers(); | |
620 | |
621 for (size_t c = 0; c < m_targetChannelCount; ++c) { | |
622 #ifdef DEBUG_AUDIO_GENERATOR | |
623 std::cout << "mixModel [sparse]: adding " << m_pluginBlockSize << " samples from plugin output " << c << std::endl; | |
624 #endif | |
625 | |
626 size_t sourceChannel = (c % plugin->getAudioOutputCount()); | |
627 | |
628 float channelGain = gain; | |
629 if (pan != 0.0) { | |
630 if (c == 0) { | |
631 if (pan > 0.0) channelGain *= 1.0 - pan; | |
632 } else { | |
633 if (pan < 0.0) channelGain *= pan + 1.0; | |
634 } | |
635 } | |
636 | |
637 for (size_t j = 0; j < m_pluginBlockSize; ++j) { | |
638 buffer[c][i * m_pluginBlockSize + j] += | |
639 channelGain * outs[sourceChannel][j]; | |
640 } | |
641 } | |
642 } | |
643 | |
644 return got; | |
645 } | |
646 | |
647 | |
648 //!!! mucho duplication with above -- refactor | |
649 size_t | |
650 AudioGenerator::mixNoteModel(NoteModel *nm, | |
651 size_t startFrame, size_t frames, | |
652 float **buffer, float gain, float pan, | |
653 size_t /* fadeIn */, | |
654 size_t /* fadeOut */) | |
655 { | |
656 RealTimePluginInstance *plugin = m_synthMap[nm]; | |
657 if (!plugin) return 0; | |
658 | |
659 size_t latency = plugin->getLatency(); | |
660 size_t blocks = frames / m_pluginBlockSize; | |
661 | |
662 //!!! hang on -- the fact that the audio callback play source's | |
663 //buffer is a multiple of the plugin's buffer size doesn't mean | |
664 //that we always get called for a multiple of it here (because it | |
665 //also depends on the JACK block size). how should we ensure that | |
666 //all models write the same amount in to the mix, and that we | |
667 //always have a multiple of the plugin buffer size? I guess this | |
668 //class has to be queryable for the plugin buffer size & the | |
669 //callback play source has to use that as a multiple for all the | |
670 //calls to mixModel | |
671 | |
672 size_t got = blocks * m_pluginBlockSize; | |
673 | |
674 #ifdef DEBUG_AUDIO_GENERATOR | |
675 std::cout << "mixModel [note]: frames " << frames | |
676 << ", blocks " << blocks << std::endl; | |
677 #endif | |
678 | |
679 snd_seq_event_t onEv; | |
680 onEv.type = SND_SEQ_EVENT_NOTEON; | |
681 onEv.data.note.channel = 0; | |
682 onEv.data.note.note = 64; | |
683 onEv.data.note.velocity = 100; | |
684 | |
685 snd_seq_event_t offEv; | |
686 offEv.type = SND_SEQ_EVENT_NOTEOFF; | |
687 offEv.data.note.channel = 0; | |
688 offEv.data.note.velocity = 0; | |
689 | |
690 NoteOffSet ¬eOffs = m_noteOffs[nm]; | |
691 | |
692 for (size_t i = 0; i < blocks; ++i) { | |
693 | |
694 size_t reqStart = startFrame + i * m_pluginBlockSize; | |
695 | |
696 NoteModel::PointList points = | |
697 nm->getPoints(reqStart + latency, | |
698 reqStart + latency + m_pluginBlockSize); | |
699 | |
700 Vamp::RealTime blockTime = Vamp::RealTime::frame2RealTime | |
701 (startFrame + i * m_pluginBlockSize, m_sourceSampleRate); | |
702 | |
703 for (NoteModel::PointList::iterator pli = | |
704 points.begin(); pli != points.end(); ++pli) { | |
705 | |
706 size_t pliFrame = pli->frame; | |
707 | |
708 if (pliFrame >= latency) pliFrame -= latency; | |
709 | |
710 if (pliFrame < reqStart || | |
711 pliFrame >= reqStart + m_pluginBlockSize) continue; | |
712 | |
713 while (noteOffs.begin() != noteOffs.end() && | |
714 noteOffs.begin()->frame <= pliFrame) { | |
715 | |
716 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime | |
717 (noteOffs.begin()->frame, m_sourceSampleRate); | |
718 | |
719 offEv.data.note.note = noteOffs.begin()->pitch; | |
720 | |
721 #ifdef DEBUG_AUDIO_GENERATOR | |
722 std::cerr << "mixModel [note]: sending note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl; | |
723 #endif | |
724 | |
725 plugin->sendEvent(eventTime, &offEv); | |
726 noteOffs.erase(noteOffs.begin()); | |
727 } | |
728 | |
729 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime | |
730 (pliFrame, m_sourceSampleRate); | |
731 | |
732 if (nm->getScaleUnits() == "Hz") { | |
733 onEv.data.note.note = Pitch::getPitchForFrequency(pli->value); | |
734 } else { | |
735 onEv.data.note.note = lrintf(pli->value); | |
736 } | |
737 | |
738 plugin->sendEvent(eventTime, &onEv); | |
739 | |
740 #ifdef DEBUG_AUDIO_GENERATOR | |
741 std::cout << "mixModel [note]: point at frame " << pliFrame << ", block start " << (startFrame + i * m_pluginBlockSize) << ", resulting time " << eventTime << std::endl; | |
742 #endif | |
743 | |
744 size_t duration = pli->duration; | |
745 if (duration == 0 || duration == 1) { | |
746 duration = m_sourceSampleRate / 20; | |
747 } | |
748 NoteOff noff; | |
749 noff.pitch = onEv.data.note.note; | |
750 noff.frame = pliFrame + duration; | |
751 noteOffs.insert(noff); | |
752 } | |
753 | |
754 while (noteOffs.begin() != noteOffs.end() && | |
755 noteOffs.begin()->frame <= | |
756 startFrame + i * m_pluginBlockSize + m_pluginBlockSize) { | |
757 | |
758 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime | |
759 (noteOffs.begin()->frame, m_sourceSampleRate); | |
760 | |
761 offEv.data.note.note = noteOffs.begin()->pitch; | |
762 | |
763 #ifdef DEBUG_AUDIO_GENERATOR | |
764 std::cerr << "mixModel [note]: sending leftover note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl; | |
765 #endif | |
766 | |
767 plugin->sendEvent(eventTime, &offEv); | |
768 noteOffs.erase(noteOffs.begin()); | |
769 } | |
770 | |
771 plugin->run(blockTime); | |
772 float **outs = plugin->getAudioOutputBuffers(); | |
773 | |
774 for (size_t c = 0; c < m_targetChannelCount; ++c) { | |
775 #ifdef DEBUG_AUDIO_GENERATOR | |
776 std::cout << "mixModel [note]: adding " << m_pluginBlockSize << " samples from plugin output " << c << std::endl; | |
777 #endif | |
778 | |
779 size_t sourceChannel = (c % plugin->getAudioOutputCount()); | |
780 | |
781 float channelGain = gain; | |
782 if (pan != 0.0) { | |
783 if (c == 0) { | |
784 if (pan > 0.0) channelGain *= 1.0 - pan; | |
785 } else { | |
786 if (pan < 0.0) channelGain *= pan + 1.0; | |
787 } | |
788 } | |
789 | |
790 for (size_t j = 0; j < m_pluginBlockSize; ++j) { | |
791 buffer[c][i * m_pluginBlockSize + j] += | |
792 channelGain * outs[sourceChannel][j]; | |
793 } | |
794 } | |
795 } | |
796 | |
797 return got; | |
798 } | |
799 |