Mercurial > hg > svapp
comparison audioio/AudioGenerator.cpp @ 450:d9d132c0e240 alignment_view
Merge from default branch
author | Chris Cannam |
---|---|
date | Mon, 20 Apr 2015 09:21:32 +0100 |
parents | c48bc6ddfe17 |
children | 3485d324c172 |
comparison
equal
deleted
inserted
replaced
430:adfb2948fabf | 450:d9d132c0e240 |
---|---|
35 #include <cmath> | 35 #include <cmath> |
36 | 36 |
37 #include <QDir> | 37 #include <QDir> |
38 #include <QFile> | 38 #include <QFile> |
39 | 39 |
40 const int | 40 const sv_frame_t |
41 AudioGenerator::m_processingBlockSize = 1024; | 41 AudioGenerator::m_processingBlockSize = 1024; |
42 | 42 |
43 QString | 43 QString |
44 AudioGenerator::m_sampleDir = ""; | 44 AudioGenerator::m_sampleDir = ""; |
45 | 45 |
122 m_sourceSampleRate = model->getSampleRate(); | 122 m_sourceSampleRate = model->getSampleRate(); |
123 return true; | 123 return true; |
124 } | 124 } |
125 } | 125 } |
126 | 126 |
127 const Playable *playable = model; | |
128 if (!playable || !playable->canPlay()) return 0; | |
129 | |
130 PlayParameters *parameters = | |
131 PlayParameterRepository::getInstance()->getPlayParameters(playable); | |
132 | |
133 bool willPlay = !parameters->isPlayMuted(); | |
134 | |
127 if (usesClipMixer(model)) { | 135 if (usesClipMixer(model)) { |
128 ClipMixer *mixer = makeClipMixerFor(model); | 136 ClipMixer *mixer = makeClipMixerFor(model); |
129 if (mixer) { | 137 if (mixer) { |
130 QMutexLocker locker(&m_mutex); | 138 QMutexLocker locker(&m_mutex); |
131 m_clipMixerMap[model] = mixer; | 139 m_clipMixerMap[model] = mixer; |
132 return true; | 140 return willPlay; |
133 } | 141 } |
134 } | 142 } |
135 | 143 |
136 if (usesContinuousSynth(model)) { | 144 if (usesContinuousSynth(model)) { |
137 ContinuousSynth *synth = makeSynthFor(model); | 145 ContinuousSynth *synth = makeSynthFor(model); |
138 if (synth) { | 146 if (synth) { |
139 QMutexLocker locker(&m_mutex); | 147 QMutexLocker locker(&m_mutex); |
140 m_continuousSynthMap[model] = synth; | 148 m_continuousSynthMap[model] = synth; |
141 return true; | 149 return willPlay; |
142 } | 150 } |
143 } | 151 } |
144 | 152 |
145 return false; | 153 return false; |
146 } | 154 } |
207 PlayParameterRepository::getInstance()->getPlayParameters(playable); | 215 PlayParameterRepository::getInstance()->getPlayParameters(playable); |
208 if (parameters) { | 216 if (parameters) { |
209 clipId = parameters->getPlayClipId(); | 217 clipId = parameters->getPlayClipId(); |
210 } | 218 } |
211 | 219 |
220 #ifdef DEBUG_AUDIO_GENERATOR | |
212 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl; | 221 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl; |
222 #endif | |
213 | 223 |
214 if (clipId == "") { | 224 if (clipId == "") { |
215 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl; | 225 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl; |
216 return 0; | 226 return 0; |
217 } | 227 } |
218 | 228 |
219 ClipMixer *mixer = new ClipMixer(m_targetChannelCount, | 229 ClipMixer *mixer = new ClipMixer(m_targetChannelCount, |
220 m_sourceSampleRate, | 230 m_sourceSampleRate, |
221 m_processingBlockSize); | 231 m_processingBlockSize); |
222 | 232 |
223 float clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0f); // required | 233 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required |
224 | 234 |
225 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId); | 235 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId); |
226 | 236 |
227 float level = wantsQuieterClips(model) ? 0.5 : 1.0; | 237 double level = wantsQuieterClips(model) ? 0.5 : 1.0; |
228 if (!mixer->loadClipData(clipPath, clipF0, level)) { | 238 if (!mixer->loadClipData(clipPath, clipF0, level)) { |
229 delete mixer; | 239 delete mixer; |
230 return 0; | 240 return 0; |
231 } | 241 } |
232 | 242 |
243 #ifdef DEBUG_AUDIO_GENERATOR | |
233 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl; | 244 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl; |
245 #endif | |
234 | 246 |
235 return mixer; | 247 return mixer; |
236 } | 248 } |
237 | 249 |
238 ContinuousSynth * | 250 ContinuousSynth * |
244 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount, | 256 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount, |
245 m_sourceSampleRate, | 257 m_sourceSampleRate, |
246 m_processingBlockSize, | 258 m_processingBlockSize, |
247 m_waveType); | 259 m_waveType); |
248 | 260 |
261 #ifdef DEBUG_AUDIO_GENERATOR | |
249 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl; | 262 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl; |
263 #endif | |
250 | 264 |
251 return synth; | 265 return synth; |
252 } | 266 } |
253 | 267 |
254 void | 268 void |
282 void | 296 void |
283 AudioGenerator::reset() | 297 AudioGenerator::reset() |
284 { | 298 { |
285 QMutexLocker locker(&m_mutex); | 299 QMutexLocker locker(&m_mutex); |
286 | 300 |
301 #ifdef DEBUG_AUDIO_GENERATOR | |
287 cerr << "AudioGenerator::reset()" << endl; | 302 cerr << "AudioGenerator::reset()" << endl; |
303 #endif | |
288 | 304 |
289 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { | 305 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { |
290 if (i->second) { | 306 if (i->second) { |
291 i->second->reset(); | 307 i->second->reset(); |
292 } | 308 } |
308 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { | 324 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { |
309 if (i->second) i->second->setChannelCount(targetChannelCount); | 325 if (i->second) i->second->setChannelCount(targetChannelCount); |
310 } | 326 } |
311 } | 327 } |
312 | 328 |
313 int | 329 sv_frame_t |
314 AudioGenerator::getBlockSize() const | 330 AudioGenerator::getBlockSize() const |
315 { | 331 { |
316 return m_processingBlockSize; | 332 return m_processingBlockSize; |
317 } | 333 } |
318 | 334 |
332 | 348 |
333 m_soloModelSet.clear(); | 349 m_soloModelSet.clear(); |
334 m_soloing = false; | 350 m_soloing = false; |
335 } | 351 } |
336 | 352 |
337 int | 353 sv_frame_t |
338 AudioGenerator::mixModel(Model *model, int startFrame, int frameCount, | 354 AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount, |
339 float **buffer, int fadeIn, int fadeOut) | 355 float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut) |
340 { | 356 { |
341 if (m_sourceSampleRate == 0) { | 357 if (m_sourceSampleRate == 0) { |
342 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; | 358 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; |
343 return frameCount; | 359 return frameCount; |
344 } | 360 } |
391 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; | 407 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; |
392 | 408 |
393 return frameCount; | 409 return frameCount; |
394 } | 410 } |
395 | 411 |
396 int | 412 sv_frame_t |
397 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, | 413 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, |
398 int startFrame, int frames, | 414 sv_frame_t startFrame, sv_frame_t frames, |
399 float **buffer, float gain, float pan, | 415 float **buffer, float gain, float pan, |
400 int fadeIn, int fadeOut) | 416 sv_frame_t fadeIn, sv_frame_t fadeOut) |
401 { | 417 { |
402 int maxFrames = frames + std::max(fadeIn, fadeOut); | 418 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut); |
403 | 419 |
404 int modelChannels = dtvm->getChannelCount(); | 420 int modelChannels = dtvm->getChannelCount(); |
405 | 421 |
406 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) { | 422 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) { |
407 | 423 |
418 | 434 |
419 m_channelBufCount = modelChannels; | 435 m_channelBufCount = modelChannels; |
420 m_channelBufSiz = maxFrames; | 436 m_channelBufSiz = maxFrames; |
421 } | 437 } |
422 | 438 |
423 int got = 0; | 439 sv_frame_t got = 0; |
424 | 440 |
425 if (startFrame >= fadeIn/2) { | 441 if (startFrame >= fadeIn/2) { |
426 got = dtvm->getData(0, modelChannels - 1, | 442 got = dtvm->getData(0, modelChannels - 1, |
427 startFrame - fadeIn/2, | 443 startFrame - fadeIn/2, |
428 frames + fadeOut/2 + fadeIn/2, | 444 frames + fadeOut/2 + fadeIn/2, |
429 m_channelBuffer); | 445 m_channelBuffer); |
430 } else { | 446 } else { |
431 int missing = fadeIn/2 - startFrame; | 447 sv_frame_t missing = fadeIn/2 - startFrame; |
432 | 448 |
433 for (int c = 0; c < modelChannels; ++c) { | 449 for (int c = 0; c < modelChannels; ++c) { |
434 m_channelBuffer[c] += missing; | 450 m_channelBuffer[c] += missing; |
435 } | 451 } |
436 | 452 |
460 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; | 476 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; |
461 | 477 |
462 float channelGain = gain; | 478 float channelGain = gain; |
463 if (pan != 0.0) { | 479 if (pan != 0.0) { |
464 if (c == 0) { | 480 if (c == 0) { |
465 if (pan > 0.0) channelGain *= 1.0 - pan; | 481 if (pan > 0.0) channelGain *= 1.0f - pan; |
466 } else { | 482 } else { |
467 if (pan < 0.0) channelGain *= pan + 1.0; | 483 if (pan < 0.0) channelGain *= pan + 1.0f; |
468 } | 484 } |
469 } | 485 } |
470 | 486 |
471 for (int i = 0; i < fadeIn/2; ++i) { | 487 for (sv_frame_t i = 0; i < fadeIn/2; ++i) { |
472 float *back = buffer[c]; | 488 float *back = buffer[c]; |
473 back -= fadeIn/2; | 489 back -= fadeIn/2; |
474 back[i] += (channelGain * m_channelBuffer[sourceChannel][i] * i) / fadeIn; | 490 back[i] += |
491 (channelGain * m_channelBuffer[sourceChannel][i] * float(i)) | |
492 / float(fadeIn); | |
475 } | 493 } |
476 | 494 |
477 for (int i = 0; i < frames + fadeOut/2; ++i) { | 495 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) { |
478 float mult = channelGain; | 496 float mult = channelGain; |
479 if (i < fadeIn/2) { | 497 if (i < fadeIn/2) { |
480 mult = (mult * i) / fadeIn; | 498 mult = (mult * float(i)) / float(fadeIn); |
481 } | 499 } |
482 if (i > frames - fadeOut/2) { | 500 if (i > frames - fadeOut/2) { |
483 mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut; | 501 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut); |
484 } | 502 } |
485 float val = m_channelBuffer[sourceChannel][i]; | 503 float val = m_channelBuffer[sourceChannel][i]; |
486 if (i >= got) val = 0.f; | 504 if (i >= got) val = 0.f; |
487 buffer[c][i] += mult * val; | 505 buffer[c][i] += mult * val; |
488 } | 506 } |
489 } | 507 } |
490 | 508 |
491 return got; | 509 return got; |
492 } | 510 } |
493 | 511 |
494 int | 512 sv_frame_t |
495 AudioGenerator::mixClipModel(Model *model, | 513 AudioGenerator::mixClipModel(Model *model, |
496 int startFrame, int frames, | 514 sv_frame_t startFrame, sv_frame_t frames, |
497 float **buffer, float gain, float pan) | 515 float **buffer, float gain, float pan) |
498 { | 516 { |
499 ClipMixer *clipMixer = m_clipMixerMap[model]; | 517 ClipMixer *clipMixer = m_clipMixerMap[model]; |
500 if (!clipMixer) return 0; | 518 if (!clipMixer) return 0; |
501 | 519 |
502 int blocks = frames / m_processingBlockSize; | 520 int blocks = int(frames / m_processingBlockSize); |
503 | 521 |
504 //!!! todo: the below -- it matters | 522 //!!! todo: the below -- it matters |
505 | 523 |
506 //!!! hang on -- the fact that the audio callback play source's | 524 //!!! hang on -- the fact that the audio callback play source's |
507 //buffer is a multiple of the plugin's buffer size doesn't mean | 525 //buffer is a multiple of the plugin's buffer size doesn't mean |
511 //always have a multiple of the plugin buffer size? I guess this | 529 //always have a multiple of the plugin buffer size? I guess this |
512 //class has to be queryable for the plugin buffer size & the | 530 //class has to be queryable for the plugin buffer size & the |
513 //callback play source has to use that as a multiple for all the | 531 //callback play source has to use that as a multiple for all the |
514 //calls to mixModel | 532 //calls to mixModel |
515 | 533 |
516 int got = blocks * m_processingBlockSize; | 534 sv_frame_t got = blocks * m_processingBlockSize; |
517 | 535 |
518 #ifdef DEBUG_AUDIO_GENERATOR | 536 #ifdef DEBUG_AUDIO_GENERATOR |
519 cout << "mixModel [clip]: frames " << frames | 537 cout << "mixModel [clip]: start " << startFrame << ", frames " << frames |
520 << ", blocks " << blocks << endl; | 538 << ", blocks " << blocks << ", have " << m_noteOffs.size() |
539 << " note-offs" << endl; | |
521 #endif | 540 #endif |
522 | 541 |
523 ClipMixer::NoteStart on; | 542 ClipMixer::NoteStart on; |
524 ClipMixer::NoteEnd off; | 543 ClipMixer::NoteEnd off; |
525 | 544 |
527 | 546 |
528 float **bufferIndexes = new float *[m_targetChannelCount]; | 547 float **bufferIndexes = new float *[m_targetChannelCount]; |
529 | 548 |
530 for (int i = 0; i < blocks; ++i) { | 549 for (int i = 0; i < blocks; ++i) { |
531 | 550 |
532 int reqStart = startFrame + i * m_processingBlockSize; | 551 sv_frame_t reqStart = startFrame + i * m_processingBlockSize; |
533 | 552 |
534 NoteList notes; | 553 NoteList notes; |
535 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); | 554 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); |
536 if (exportable) { | 555 if (exportable) { |
537 notes = exportable->getNotesWithin(reqStart, | 556 notes = exportable->getNotesWithin(reqStart, |
542 std::vector<ClipMixer::NoteEnd> ends; | 561 std::vector<ClipMixer::NoteEnd> ends; |
543 | 562 |
544 for (NoteList::const_iterator ni = notes.begin(); | 563 for (NoteList::const_iterator ni = notes.begin(); |
545 ni != notes.end(); ++ni) { | 564 ni != notes.end(); ++ni) { |
546 | 565 |
547 int noteFrame = ni->start; | 566 sv_frame_t noteFrame = ni->start; |
548 | 567 |
549 if (noteFrame < reqStart || | 568 if (noteFrame < reqStart || |
550 noteFrame >= reqStart + m_processingBlockSize) continue; | 569 noteFrame >= reqStart + m_processingBlockSize) continue; |
551 | 570 |
552 while (noteOffs.begin() != noteOffs.end() && | 571 while (noteOffs.begin() != noteOffs.end() && |
553 noteOffs.begin()->frame <= noteFrame) { | 572 noteOffs.begin()->frame <= noteFrame) { |
554 | 573 |
555 int eventFrame = noteOffs.begin()->frame; | 574 sv_frame_t eventFrame = noteOffs.begin()->frame; |
556 if (eventFrame < reqStart) eventFrame = reqStart; | 575 if (eventFrame < reqStart) eventFrame = reqStart; |
557 | 576 |
558 off.frameOffset = eventFrame - reqStart; | 577 off.frameOffset = eventFrame - reqStart; |
559 off.frequency = noteOffs.begin()->frequency; | 578 off.frequency = noteOffs.begin()->frequency; |
560 | 579 |
566 noteOffs.erase(noteOffs.begin()); | 585 noteOffs.erase(noteOffs.begin()); |
567 } | 586 } |
568 | 587 |
569 on.frameOffset = noteFrame - reqStart; | 588 on.frameOffset = noteFrame - reqStart; |
570 on.frequency = ni->getFrequency(); | 589 on.frequency = ni->getFrequency(); |
571 on.level = float(ni->velocity) / 127.0; | 590 on.level = float(ni->velocity) / 127.0f; |
572 on.pan = pan; | 591 on.pan = pan; |
573 | 592 |
574 #ifdef DEBUG_AUDIO_GENERATOR | 593 #ifdef DEBUG_AUDIO_GENERATOR |
575 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl; | 594 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl; |
576 #endif | 595 #endif |
581 } | 600 } |
582 | 601 |
583 while (noteOffs.begin() != noteOffs.end() && | 602 while (noteOffs.begin() != noteOffs.end() && |
584 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { | 603 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { |
585 | 604 |
586 int eventFrame = noteOffs.begin()->frame; | 605 sv_frame_t eventFrame = noteOffs.begin()->frame; |
587 if (eventFrame < reqStart) eventFrame = reqStart; | 606 if (eventFrame < reqStart) eventFrame = reqStart; |
588 | 607 |
589 off.frameOffset = eventFrame - reqStart; | 608 off.frameOffset = eventFrame - reqStart; |
590 off.frequency = noteOffs.begin()->frequency; | 609 off.frequency = noteOffs.begin()->frequency; |
591 | 610 |
607 delete[] bufferIndexes; | 626 delete[] bufferIndexes; |
608 | 627 |
609 return got; | 628 return got; |
610 } | 629 } |
611 | 630 |
612 int | 631 sv_frame_t |
613 AudioGenerator::mixContinuousSynthModel(Model *model, | 632 AudioGenerator::mixContinuousSynthModel(Model *model, |
614 int startFrame, | 633 sv_frame_t startFrame, |
615 int frames, | 634 sv_frame_t frames, |
616 float **buffer, | 635 float **buffer, |
617 float gain, | 636 float gain, |
618 float pan) | 637 float pan) |
619 { | 638 { |
620 ContinuousSynth *synth = m_continuousSynthMap[model]; | 639 ContinuousSynth *synth = m_continuousSynthMap[model]; |
622 | 641 |
623 // only type we support here at the moment | 642 // only type we support here at the moment |
624 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); | 643 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); |
625 if (stvm->getScaleUnits() != "Hz") return 0; | 644 if (stvm->getScaleUnits() != "Hz") return 0; |
626 | 645 |
627 int blocks = frames / m_processingBlockSize; | 646 int blocks = int(frames / m_processingBlockSize); |
628 | 647 |
629 //!!! todo: see comment in mixClipModel | 648 //!!! todo: see comment in mixClipModel |
630 | 649 |
631 int got = blocks * m_processingBlockSize; | 650 sv_frame_t got = blocks * m_processingBlockSize; |
632 | 651 |
633 #ifdef DEBUG_AUDIO_GENERATOR | 652 #ifdef DEBUG_AUDIO_GENERATOR |
634 cout << "mixModel [synth]: frames " << frames | 653 cout << "mixModel [synth]: frames " << frames |
635 << ", blocks " << blocks << endl; | 654 << ", blocks " << blocks << endl; |
636 #endif | 655 #endif |
637 | 656 |
638 float **bufferIndexes = new float *[m_targetChannelCount]; | 657 float **bufferIndexes = new float *[m_targetChannelCount]; |
639 | 658 |
640 for (int i = 0; i < blocks; ++i) { | 659 for (int i = 0; i < blocks; ++i) { |
641 | 660 |
642 int reqStart = startFrame + i * m_processingBlockSize; | 661 sv_frame_t reqStart = startFrame + i * m_processingBlockSize; |
643 | 662 |
644 for (int c = 0; c < m_targetChannelCount; ++c) { | 663 for (int c = 0; c < m_targetChannelCount; ++c) { |
645 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; | 664 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; |
646 } | 665 } |
647 | 666 |