Mercurial > hg > svapp
comparison audioio/AudioGenerator.cpp @ 436:72c662fe7ea3 cxx11
Further dedicated-types fixes
author | Chris Cannam |
---|---|
date | Tue, 10 Mar 2015 17:02:52 +0000 |
parents | f747be6743ab |
children | aa6fb3516e28 |
comparison
equal
deleted
inserted
replaced
435:618d5816b04d | 436:72c662fe7ea3 |
---|---|
35 #include <cmath> | 35 #include <cmath> |
36 | 36 |
37 #include <QDir> | 37 #include <QDir> |
38 #include <QFile> | 38 #include <QFile> |
39 | 39 |
40 const int | 40 const sv_frame_t |
41 AudioGenerator::m_processingBlockSize = 1024; | 41 AudioGenerator::m_processingBlockSize = 1024; |
42 | 42 |
43 QString | 43 QString |
44 AudioGenerator::m_sampleDir = ""; | 44 AudioGenerator::m_sampleDir = ""; |
45 | 45 |
218 | 218 |
219 ClipMixer *mixer = new ClipMixer(m_targetChannelCount, | 219 ClipMixer *mixer = new ClipMixer(m_targetChannelCount, |
220 m_sourceSampleRate, | 220 m_sourceSampleRate, |
221 m_processingBlockSize); | 221 m_processingBlockSize); |
222 | 222 |
223 float clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0f); // required | 223 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required |
224 | 224 |
225 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId); | 225 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId); |
226 | 226 |
227 float level = wantsQuieterClips(model) ? 0.5 : 1.0; | 227 double level = wantsQuieterClips(model) ? 0.5 : 1.0; |
228 if (!mixer->loadClipData(clipPath, clipF0, level)) { | 228 if (!mixer->loadClipData(clipPath, clipF0, level)) { |
229 delete mixer; | 229 delete mixer; |
230 return 0; | 230 return 0; |
231 } | 231 } |
232 | 232 |
308 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { | 308 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { |
309 if (i->second) i->second->setChannelCount(targetChannelCount); | 309 if (i->second) i->second->setChannelCount(targetChannelCount); |
310 } | 310 } |
311 } | 311 } |
312 | 312 |
313 int | 313 sv_frame_t |
314 AudioGenerator::getBlockSize() const | 314 AudioGenerator::getBlockSize() const |
315 { | 315 { |
316 return m_processingBlockSize; | 316 return m_processingBlockSize; |
317 } | 317 } |
318 | 318 |
332 | 332 |
333 m_soloModelSet.clear(); | 333 m_soloModelSet.clear(); |
334 m_soloing = false; | 334 m_soloing = false; |
335 } | 335 } |
336 | 336 |
337 int | 337 sv_frame_t |
338 AudioGenerator::mixModel(Model *model, int startFrame, int frameCount, | 338 AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount, |
339 float **buffer, int fadeIn, int fadeOut) | 339 float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut) |
340 { | 340 { |
341 if (m_sourceSampleRate == 0) { | 341 if (m_sourceSampleRate == 0) { |
342 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; | 342 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; |
343 return frameCount; | 343 return frameCount; |
344 } | 344 } |
391 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; | 391 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; |
392 | 392 |
393 return frameCount; | 393 return frameCount; |
394 } | 394 } |
395 | 395 |
396 int | 396 sv_frame_t |
397 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, | 397 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, |
398 int startFrame, int frames, | 398 sv_frame_t startFrame, sv_frame_t frames, |
399 float **buffer, float gain, float pan, | 399 float **buffer, float gain, float pan, |
400 int fadeIn, int fadeOut) | 400 sv_frame_t fadeIn, sv_frame_t fadeOut) |
401 { | 401 { |
402 int maxFrames = frames + std::max(fadeIn, fadeOut); | 402 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut); |
403 | 403 |
404 int modelChannels = dtvm->getChannelCount(); | 404 int modelChannels = dtvm->getChannelCount(); |
405 | 405 |
406 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) { | 406 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) { |
407 | 407 |
418 | 418 |
419 m_channelBufCount = modelChannels; | 419 m_channelBufCount = modelChannels; |
420 m_channelBufSiz = maxFrames; | 420 m_channelBufSiz = maxFrames; |
421 } | 421 } |
422 | 422 |
423 int got = 0; | 423 sv_frame_t got = 0; |
424 | 424 |
425 if (startFrame >= fadeIn/2) { | 425 if (startFrame >= fadeIn/2) { |
426 got = dtvm->getData(0, modelChannels - 1, | 426 got = dtvm->getData(0, modelChannels - 1, |
427 startFrame - fadeIn/2, | 427 startFrame - fadeIn/2, |
428 frames + fadeOut/2 + fadeIn/2, | 428 frames + fadeOut/2 + fadeIn/2, |
429 m_channelBuffer); | 429 m_channelBuffer); |
430 } else { | 430 } else { |
431 int missing = fadeIn/2 - startFrame; | 431 sv_frame_t missing = fadeIn/2 - startFrame; |
432 | 432 |
433 for (int c = 0; c < modelChannels; ++c) { | 433 for (int c = 0; c < modelChannels; ++c) { |
434 m_channelBuffer[c] += missing; | 434 m_channelBuffer[c] += missing; |
435 } | 435 } |
436 | 436 |
460 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; | 460 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; |
461 | 461 |
462 float channelGain = gain; | 462 float channelGain = gain; |
463 if (pan != 0.0) { | 463 if (pan != 0.0) { |
464 if (c == 0) { | 464 if (c == 0) { |
465 if (pan > 0.0) channelGain *= 1.0 - pan; | 465 if (pan > 0.0) channelGain *= 1.0f - pan; |
466 } else { | 466 } else { |
467 if (pan < 0.0) channelGain *= pan + 1.0; | 467 if (pan < 0.0) channelGain *= pan + 1.0f; |
468 } | 468 } |
469 } | 469 } |
470 | 470 |
471 for (int i = 0; i < fadeIn/2; ++i) { | 471 for (sv_frame_t i = 0; i < fadeIn/2; ++i) { |
472 float *back = buffer[c]; | 472 float *back = buffer[c]; |
473 back -= fadeIn/2; | 473 back -= fadeIn/2; |
474 back[i] += (channelGain * m_channelBuffer[sourceChannel][i] * i) / fadeIn; | 474 back[i] += |
475 (channelGain * m_channelBuffer[sourceChannel][i] * float(i)) | |
476 / float(fadeIn); | |
475 } | 477 } |
476 | 478 |
477 for (int i = 0; i < frames + fadeOut/2; ++i) { | 479 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) { |
478 float mult = channelGain; | 480 float mult = channelGain; |
479 if (i < fadeIn/2) { | 481 if (i < fadeIn/2) { |
480 mult = (mult * i) / fadeIn; | 482 mult = (mult * float(i)) / float(fadeIn); |
481 } | 483 } |
482 if (i > frames - fadeOut/2) { | 484 if (i > frames - fadeOut/2) { |
483 mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut; | 485 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut); |
484 } | 486 } |
485 float val = m_channelBuffer[sourceChannel][i]; | 487 float val = m_channelBuffer[sourceChannel][i]; |
486 if (i >= got) val = 0.f; | 488 if (i >= got) val = 0.f; |
487 buffer[c][i] += mult * val; | 489 buffer[c][i] += mult * val; |
488 } | 490 } |
489 } | 491 } |
490 | 492 |
491 return got; | 493 return got; |
492 } | 494 } |
493 | 495 |
494 int | 496 sv_frame_t |
495 AudioGenerator::mixClipModel(Model *model, | 497 AudioGenerator::mixClipModel(Model *model, |
496 int startFrame, int frames, | 498 sv_frame_t startFrame, sv_frame_t frames, |
497 float **buffer, float gain, float pan) | 499 float **buffer, float gain, float pan) |
498 { | 500 { |
499 ClipMixer *clipMixer = m_clipMixerMap[model]; | 501 ClipMixer *clipMixer = m_clipMixerMap[model]; |
500 if (!clipMixer) return 0; | 502 if (!clipMixer) return 0; |
501 | 503 |
502 int blocks = frames / m_processingBlockSize; | 504 int blocks = int(frames / m_processingBlockSize); |
503 | 505 |
504 //!!! todo: the below -- it matters | 506 //!!! todo: the below -- it matters |
505 | 507 |
506 //!!! hang on -- the fact that the audio callback play source's | 508 //!!! hang on -- the fact that the audio callback play source's |
507 //buffer is a multiple of the plugin's buffer size doesn't mean | 509 //buffer is a multiple of the plugin's buffer size doesn't mean |
511 //always have a multiple of the plugin buffer size? I guess this | 513 //always have a multiple of the plugin buffer size? I guess this |
512 //class has to be queryable for the plugin buffer size & the | 514 //class has to be queryable for the plugin buffer size & the |
513 //callback play source has to use that as a multiple for all the | 515 //callback play source has to use that as a multiple for all the |
514 //calls to mixModel | 516 //calls to mixModel |
515 | 517 |
516 int got = blocks * m_processingBlockSize; | 518 sv_frame_t got = blocks * m_processingBlockSize; |
517 | 519 |
518 #ifdef DEBUG_AUDIO_GENERATOR | 520 #ifdef DEBUG_AUDIO_GENERATOR |
519 cout << "mixModel [clip]: frames " << frames | 521 cout << "mixModel [clip]: frames " << frames |
520 << ", blocks " << blocks << endl; | 522 << ", blocks " << blocks << endl; |
521 #endif | 523 #endif |
527 | 529 |
528 float **bufferIndexes = new float *[m_targetChannelCount]; | 530 float **bufferIndexes = new float *[m_targetChannelCount]; |
529 | 531 |
530 for (int i = 0; i < blocks; ++i) { | 532 for (int i = 0; i < blocks; ++i) { |
531 | 533 |
532 int reqStart = startFrame + i * m_processingBlockSize; | 534 sv_frame_t reqStart = startFrame + i * m_processingBlockSize; |
533 | 535 |
534 NoteList notes; | 536 NoteList notes; |
535 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); | 537 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); |
536 if (exportable) { | 538 if (exportable) { |
537 notes = exportable->getNotesWithin(reqStart, | 539 notes = exportable->getNotesWithin(reqStart, |
542 std::vector<ClipMixer::NoteEnd> ends; | 544 std::vector<ClipMixer::NoteEnd> ends; |
543 | 545 |
544 for (NoteList::const_iterator ni = notes.begin(); | 546 for (NoteList::const_iterator ni = notes.begin(); |
545 ni != notes.end(); ++ni) { | 547 ni != notes.end(); ++ni) { |
546 | 548 |
547 int noteFrame = ni->start; | 549 sv_frame_t noteFrame = ni->start; |
548 | 550 |
549 if (noteFrame < reqStart || | 551 if (noteFrame < reqStart || |
550 noteFrame >= reqStart + m_processingBlockSize) continue; | 552 noteFrame >= reqStart + m_processingBlockSize) continue; |
551 | 553 |
552 while (noteOffs.begin() != noteOffs.end() && | 554 while (noteOffs.begin() != noteOffs.end() && |
553 noteOffs.begin()->frame <= noteFrame) { | 555 noteOffs.begin()->frame <= noteFrame) { |
554 | 556 |
555 int eventFrame = noteOffs.begin()->frame; | 557 sv_frame_t eventFrame = noteOffs.begin()->frame; |
556 if (eventFrame < reqStart) eventFrame = reqStart; | 558 if (eventFrame < reqStart) eventFrame = reqStart; |
557 | 559 |
558 off.frameOffset = eventFrame - reqStart; | 560 off.frameOffset = eventFrame - reqStart; |
559 off.frequency = noteOffs.begin()->frequency; | 561 off.frequency = noteOffs.begin()->frequency; |
560 | 562 |
566 noteOffs.erase(noteOffs.begin()); | 568 noteOffs.erase(noteOffs.begin()); |
567 } | 569 } |
568 | 570 |
569 on.frameOffset = noteFrame - reqStart; | 571 on.frameOffset = noteFrame - reqStart; |
570 on.frequency = ni->getFrequency(); | 572 on.frequency = ni->getFrequency(); |
571 on.level = float(ni->velocity) / 127.0; | 573 on.level = float(ni->velocity) / 127.0f; |
572 on.pan = pan; | 574 on.pan = pan; |
573 | 575 |
574 #ifdef DEBUG_AUDIO_GENERATOR | 576 #ifdef DEBUG_AUDIO_GENERATOR |
575 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl; | 577 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl; |
576 #endif | 578 #endif |
581 } | 583 } |
582 | 584 |
583 while (noteOffs.begin() != noteOffs.end() && | 585 while (noteOffs.begin() != noteOffs.end() && |
584 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { | 586 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { |
585 | 587 |
586 int eventFrame = noteOffs.begin()->frame; | 588 sv_frame_t eventFrame = noteOffs.begin()->frame; |
587 if (eventFrame < reqStart) eventFrame = reqStart; | 589 if (eventFrame < reqStart) eventFrame = reqStart; |
588 | 590 |
589 off.frameOffset = eventFrame - reqStart; | 591 off.frameOffset = eventFrame - reqStart; |
590 off.frequency = noteOffs.begin()->frequency; | 592 off.frequency = noteOffs.begin()->frequency; |
591 | 593 |
607 delete[] bufferIndexes; | 609 delete[] bufferIndexes; |
608 | 610 |
609 return got; | 611 return got; |
610 } | 612 } |
611 | 613 |
612 int | 614 sv_frame_t |
613 AudioGenerator::mixContinuousSynthModel(Model *model, | 615 AudioGenerator::mixContinuousSynthModel(Model *model, |
614 int startFrame, | 616 sv_frame_t startFrame, |
615 int frames, | 617 sv_frame_t frames, |
616 float **buffer, | 618 float **buffer, |
617 float gain, | 619 float gain, |
618 float pan) | 620 float pan) |
619 { | 621 { |
620 ContinuousSynth *synth = m_continuousSynthMap[model]; | 622 ContinuousSynth *synth = m_continuousSynthMap[model]; |
622 | 624 |
623 // only type we support here at the moment | 625 // only type we support here at the moment |
624 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); | 626 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); |
625 if (stvm->getScaleUnits() != "Hz") return 0; | 627 if (stvm->getScaleUnits() != "Hz") return 0; |
626 | 628 |
627 int blocks = frames / m_processingBlockSize; | 629 int blocks = int(frames / m_processingBlockSize); |
628 | 630 |
629 //!!! todo: see comment in mixClipModel | 631 //!!! todo: see comment in mixClipModel |
630 | 632 |
631 int got = blocks * m_processingBlockSize; | 633 sv_frame_t got = blocks * m_processingBlockSize; |
632 | 634 |
633 #ifdef DEBUG_AUDIO_GENERATOR | 635 #ifdef DEBUG_AUDIO_GENERATOR |
634 cout << "mixModel [synth]: frames " << frames | 636 cout << "mixModel [synth]: frames " << frames |
635 << ", blocks " << blocks << endl; | 637 << ", blocks " << blocks << endl; |
636 #endif | 638 #endif |
637 | 639 |
638 float **bufferIndexes = new float *[m_targetChannelCount]; | 640 float **bufferIndexes = new float *[m_targetChannelCount]; |
639 | 641 |
640 for (int i = 0; i < blocks; ++i) { | 642 for (int i = 0; i < blocks; ++i) { |
641 | 643 |
642 int reqStart = startFrame + i * m_processingBlockSize; | 644 sv_frame_t reqStart = startFrame + i * m_processingBlockSize; |
643 | 645 |
644 for (int c = 0; c < m_targetChannelCount; ++c) { | 646 for (int c = 0; c < m_targetChannelCount; ++c) { |
645 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; | 647 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; |
646 } | 648 } |
647 | 649 |