Mercurial > hg > svapp
comparison audioio/AudioGenerator.cpp @ 441:aa6fb3516e28 tonioni
Merge from cxx11 branch
author | Chris Cannam |
---|---|
date | Mon, 23 Mar 2015 11:26:28 +0000 |
parents | 8d2112977aa0 72c662fe7ea3 |
children | 88ae0e53a5da |
comparison
equal
deleted
inserted
replaced
440:2185d52b4758 | 441:aa6fb3516e28 |
---|---|
35 #include <cmath> | 35 #include <cmath> |
36 | 36 |
37 #include <QDir> | 37 #include <QDir> |
38 #include <QFile> | 38 #include <QFile> |
39 | 39 |
40 const int | 40 const sv_frame_t |
41 AudioGenerator::m_processingBlockSize = 1024; | 41 AudioGenerator::m_processingBlockSize = 1024; |
42 | 42 |
43 QString | 43 QString |
44 AudioGenerator::m_sampleDir = ""; | 44 AudioGenerator::m_sampleDir = ""; |
45 | 45 |
226 | 226 |
227 ClipMixer *mixer = new ClipMixer(m_targetChannelCount, | 227 ClipMixer *mixer = new ClipMixer(m_targetChannelCount, |
228 m_sourceSampleRate, | 228 m_sourceSampleRate, |
229 m_processingBlockSize); | 229 m_processingBlockSize); |
230 | 230 |
231 float clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0f); // required | 231 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required |
232 | 232 |
233 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId); | 233 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId); |
234 | 234 |
235 float level = wantsQuieterClips(model) ? 0.5 : 1.0; | 235 double level = wantsQuieterClips(model) ? 0.5 : 1.0; |
236 if (!mixer->loadClipData(clipPath, clipF0, level)) { | 236 if (!mixer->loadClipData(clipPath, clipF0, level)) { |
237 delete mixer; | 237 delete mixer; |
238 return 0; | 238 return 0; |
239 } | 239 } |
240 | 240 |
316 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { | 316 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { |
317 if (i->second) i->second->setChannelCount(targetChannelCount); | 317 if (i->second) i->second->setChannelCount(targetChannelCount); |
318 } | 318 } |
319 } | 319 } |
320 | 320 |
321 int | 321 sv_frame_t |
322 AudioGenerator::getBlockSize() const | 322 AudioGenerator::getBlockSize() const |
323 { | 323 { |
324 return m_processingBlockSize; | 324 return m_processingBlockSize; |
325 } | 325 } |
326 | 326 |
340 | 340 |
341 m_soloModelSet.clear(); | 341 m_soloModelSet.clear(); |
342 m_soloing = false; | 342 m_soloing = false; |
343 } | 343 } |
344 | 344 |
345 int | 345 sv_frame_t |
346 AudioGenerator::mixModel(Model *model, int startFrame, int frameCount, | 346 AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount, |
347 float **buffer, int fadeIn, int fadeOut) | 347 float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut) |
348 { | 348 { |
349 if (m_sourceSampleRate == 0) { | 349 if (m_sourceSampleRate == 0) { |
350 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; | 350 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; |
351 return frameCount; | 351 return frameCount; |
352 } | 352 } |
399 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; | 399 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; |
400 | 400 |
401 return frameCount; | 401 return frameCount; |
402 } | 402 } |
403 | 403 |
404 int | 404 sv_frame_t |
405 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, | 405 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, |
406 int startFrame, int frames, | 406 sv_frame_t startFrame, sv_frame_t frames, |
407 float **buffer, float gain, float pan, | 407 float **buffer, float gain, float pan, |
408 int fadeIn, int fadeOut) | 408 sv_frame_t fadeIn, sv_frame_t fadeOut) |
409 { | 409 { |
410 int maxFrames = frames + std::max(fadeIn, fadeOut); | 410 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut); |
411 | 411 |
412 int modelChannels = dtvm->getChannelCount(); | 412 int modelChannels = dtvm->getChannelCount(); |
413 | 413 |
414 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) { | 414 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) { |
415 | 415 |
426 | 426 |
427 m_channelBufCount = modelChannels; | 427 m_channelBufCount = modelChannels; |
428 m_channelBufSiz = maxFrames; | 428 m_channelBufSiz = maxFrames; |
429 } | 429 } |
430 | 430 |
431 int got = 0; | 431 sv_frame_t got = 0; |
432 | 432 |
433 if (startFrame >= fadeIn/2) { | 433 if (startFrame >= fadeIn/2) { |
434 got = dtvm->getData(0, modelChannels - 1, | 434 got = dtvm->getData(0, modelChannels - 1, |
435 startFrame - fadeIn/2, | 435 startFrame - fadeIn/2, |
436 frames + fadeOut/2 + fadeIn/2, | 436 frames + fadeOut/2 + fadeIn/2, |
437 m_channelBuffer); | 437 m_channelBuffer); |
438 } else { | 438 } else { |
439 int missing = fadeIn/2 - startFrame; | 439 sv_frame_t missing = fadeIn/2 - startFrame; |
440 | 440 |
441 for (int c = 0; c < modelChannels; ++c) { | 441 for (int c = 0; c < modelChannels; ++c) { |
442 m_channelBuffer[c] += missing; | 442 m_channelBuffer[c] += missing; |
443 } | 443 } |
444 | 444 |
468 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; | 468 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; |
469 | 469 |
470 float channelGain = gain; | 470 float channelGain = gain; |
471 if (pan != 0.0) { | 471 if (pan != 0.0) { |
472 if (c == 0) { | 472 if (c == 0) { |
473 if (pan > 0.0) channelGain *= 1.0 - pan; | 473 if (pan > 0.0) channelGain *= 1.0f - pan; |
474 } else { | 474 } else { |
475 if (pan < 0.0) channelGain *= pan + 1.0; | 475 if (pan < 0.0) channelGain *= pan + 1.0f; |
476 } | 476 } |
477 } | 477 } |
478 | 478 |
479 for (int i = 0; i < fadeIn/2; ++i) { | 479 for (sv_frame_t i = 0; i < fadeIn/2; ++i) { |
480 float *back = buffer[c]; | 480 float *back = buffer[c]; |
481 back -= fadeIn/2; | 481 back -= fadeIn/2; |
482 back[i] += (channelGain * m_channelBuffer[sourceChannel][i] * i) / fadeIn; | 482 back[i] += |
483 (channelGain * m_channelBuffer[sourceChannel][i] * float(i)) | |
484 / float(fadeIn); | |
483 } | 485 } |
484 | 486 |
485 for (int i = 0; i < frames + fadeOut/2; ++i) { | 487 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) { |
486 float mult = channelGain; | 488 float mult = channelGain; |
487 if (i < fadeIn/2) { | 489 if (i < fadeIn/2) { |
488 mult = (mult * i) / fadeIn; | 490 mult = (mult * float(i)) / float(fadeIn); |
489 } | 491 } |
490 if (i > frames - fadeOut/2) { | 492 if (i > frames - fadeOut/2) { |
491 mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut; | 493 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut); |
492 } | 494 } |
493 float val = m_channelBuffer[sourceChannel][i]; | 495 float val = m_channelBuffer[sourceChannel][i]; |
494 if (i >= got) val = 0.f; | 496 if (i >= got) val = 0.f; |
495 buffer[c][i] += mult * val; | 497 buffer[c][i] += mult * val; |
496 } | 498 } |
497 } | 499 } |
498 | 500 |
499 return got; | 501 return got; |
500 } | 502 } |
501 | 503 |
502 int | 504 sv_frame_t |
503 AudioGenerator::mixClipModel(Model *model, | 505 AudioGenerator::mixClipModel(Model *model, |
504 int startFrame, int frames, | 506 sv_frame_t startFrame, sv_frame_t frames, |
505 float **buffer, float gain, float pan) | 507 float **buffer, float gain, float pan) |
506 { | 508 { |
507 ClipMixer *clipMixer = m_clipMixerMap[model]; | 509 ClipMixer *clipMixer = m_clipMixerMap[model]; |
508 if (!clipMixer) return 0; | 510 if (!clipMixer) return 0; |
509 | 511 |
510 int blocks = frames / m_processingBlockSize; | 512 int blocks = int(frames / m_processingBlockSize); |
511 | 513 |
512 //!!! todo: the below -- it matters | 514 //!!! todo: the below -- it matters |
513 | 515 |
514 //!!! hang on -- the fact that the audio callback play source's | 516 //!!! hang on -- the fact that the audio callback play source's |
515 //buffer is a multiple of the plugin's buffer size doesn't mean | 517 //buffer is a multiple of the plugin's buffer size doesn't mean |
519 //always have a multiple of the plugin buffer size? I guess this | 521 //always have a multiple of the plugin buffer size? I guess this |
520 //class has to be queryable for the plugin buffer size & the | 522 //class has to be queryable for the plugin buffer size & the |
521 //callback play source has to use that as a multiple for all the | 523 //callback play source has to use that as a multiple for all the |
522 //calls to mixModel | 524 //calls to mixModel |
523 | 525 |
524 int got = blocks * m_processingBlockSize; | 526 sv_frame_t got = blocks * m_processingBlockSize; |
525 | 527 |
526 #ifdef DEBUG_AUDIO_GENERATOR | 528 #ifdef DEBUG_AUDIO_GENERATOR |
527 cout << "mixModel [clip]: frames " << frames | 529 cout << "mixModel [clip]: frames " << frames |
528 << ", blocks " << blocks << endl; | 530 << ", blocks " << blocks << endl; |
529 #endif | 531 #endif |
535 | 537 |
536 float **bufferIndexes = new float *[m_targetChannelCount]; | 538 float **bufferIndexes = new float *[m_targetChannelCount]; |
537 | 539 |
538 for (int i = 0; i < blocks; ++i) { | 540 for (int i = 0; i < blocks; ++i) { |
539 | 541 |
540 int reqStart = startFrame + i * m_processingBlockSize; | 542 sv_frame_t reqStart = startFrame + i * m_processingBlockSize; |
541 | 543 |
542 NoteList notes; | 544 NoteList notes; |
543 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); | 545 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); |
544 if (exportable) { | 546 if (exportable) { |
545 notes = exportable->getNotesWithin(reqStart, | 547 notes = exportable->getNotesWithin(reqStart, |
550 std::vector<ClipMixer::NoteEnd> ends; | 552 std::vector<ClipMixer::NoteEnd> ends; |
551 | 553 |
552 for (NoteList::const_iterator ni = notes.begin(); | 554 for (NoteList::const_iterator ni = notes.begin(); |
553 ni != notes.end(); ++ni) { | 555 ni != notes.end(); ++ni) { |
554 | 556 |
555 int noteFrame = ni->start; | 557 sv_frame_t noteFrame = ni->start; |
556 | 558 |
557 if (noteFrame < reqStart || | 559 if (noteFrame < reqStart || |
558 noteFrame >= reqStart + m_processingBlockSize) continue; | 560 noteFrame >= reqStart + m_processingBlockSize) continue; |
559 | 561 |
560 while (noteOffs.begin() != noteOffs.end() && | 562 while (noteOffs.begin() != noteOffs.end() && |
561 noteOffs.begin()->frame <= noteFrame) { | 563 noteOffs.begin()->frame <= noteFrame) { |
562 | 564 |
563 int eventFrame = noteOffs.begin()->frame; | 565 sv_frame_t eventFrame = noteOffs.begin()->frame; |
564 if (eventFrame < reqStart) eventFrame = reqStart; | 566 if (eventFrame < reqStart) eventFrame = reqStart; |
565 | 567 |
566 off.frameOffset = eventFrame - reqStart; | 568 off.frameOffset = eventFrame - reqStart; |
567 off.frequency = noteOffs.begin()->frequency; | 569 off.frequency = noteOffs.begin()->frequency; |
568 | 570 |
574 noteOffs.erase(noteOffs.begin()); | 576 noteOffs.erase(noteOffs.begin()); |
575 } | 577 } |
576 | 578 |
577 on.frameOffset = noteFrame - reqStart; | 579 on.frameOffset = noteFrame - reqStart; |
578 on.frequency = ni->getFrequency(); | 580 on.frequency = ni->getFrequency(); |
579 on.level = float(ni->velocity) / 127.0; | 581 on.level = float(ni->velocity) / 127.0f; |
580 on.pan = pan; | 582 on.pan = pan; |
581 | 583 |
582 #ifdef DEBUG_AUDIO_GENERATOR | 584 #ifdef DEBUG_AUDIO_GENERATOR |
583 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl; | 585 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl; |
584 #endif | 586 #endif |
589 } | 591 } |
590 | 592 |
591 while (noteOffs.begin() != noteOffs.end() && | 593 while (noteOffs.begin() != noteOffs.end() && |
592 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { | 594 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { |
593 | 595 |
594 int eventFrame = noteOffs.begin()->frame; | 596 sv_frame_t eventFrame = noteOffs.begin()->frame; |
595 if (eventFrame < reqStart) eventFrame = reqStart; | 597 if (eventFrame < reqStart) eventFrame = reqStart; |
596 | 598 |
597 off.frameOffset = eventFrame - reqStart; | 599 off.frameOffset = eventFrame - reqStart; |
598 off.frequency = noteOffs.begin()->frequency; | 600 off.frequency = noteOffs.begin()->frequency; |
599 | 601 |
615 delete[] bufferIndexes; | 617 delete[] bufferIndexes; |
616 | 618 |
617 return got; | 619 return got; |
618 } | 620 } |
619 | 621 |
620 int | 622 sv_frame_t |
621 AudioGenerator::mixContinuousSynthModel(Model *model, | 623 AudioGenerator::mixContinuousSynthModel(Model *model, |
622 int startFrame, | 624 sv_frame_t startFrame, |
623 int frames, | 625 sv_frame_t frames, |
624 float **buffer, | 626 float **buffer, |
625 float gain, | 627 float gain, |
626 float pan) | 628 float pan) |
627 { | 629 { |
628 ContinuousSynth *synth = m_continuousSynthMap[model]; | 630 ContinuousSynth *synth = m_continuousSynthMap[model]; |
630 | 632 |
631 // only type we support here at the moment | 633 // only type we support here at the moment |
632 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); | 634 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); |
633 if (stvm->getScaleUnits() != "Hz") return 0; | 635 if (stvm->getScaleUnits() != "Hz") return 0; |
634 | 636 |
635 int blocks = frames / m_processingBlockSize; | 637 int blocks = int(frames / m_processingBlockSize); |
636 | 638 |
637 //!!! todo: see comment in mixClipModel | 639 //!!! todo: see comment in mixClipModel |
638 | 640 |
639 int got = blocks * m_processingBlockSize; | 641 sv_frame_t got = blocks * m_processingBlockSize; |
640 | 642 |
641 #ifdef DEBUG_AUDIO_GENERATOR | 643 #ifdef DEBUG_AUDIO_GENERATOR |
642 cout << "mixModel [synth]: frames " << frames | 644 cout << "mixModel [synth]: frames " << frames |
643 << ", blocks " << blocks << endl; | 645 << ", blocks " << blocks << endl; |
644 #endif | 646 #endif |
645 | 647 |
646 float **bufferIndexes = new float *[m_targetChannelCount]; | 648 float **bufferIndexes = new float *[m_targetChannelCount]; |
647 | 649 |
648 for (int i = 0; i < blocks; ++i) { | 650 for (int i = 0; i < blocks; ++i) { |
649 | 651 |
650 int reqStart = startFrame + i * m_processingBlockSize; | 652 sv_frame_t reqStart = startFrame + i * m_processingBlockSize; |
651 | 653 |
652 for (int c = 0; c < m_targetChannelCount; ++c) { | 654 for (int c = 0; c < m_targetChannelCount; ++c) { |
653 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; | 655 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; |
654 } | 656 } |
655 | 657 |