comparison audio/AudioGenerator.cpp @ 595:b23bebfdfaba

Untabify
author Chris Cannam
date Thu, 01 Mar 2018 18:02:22 +0000
parents 821aba42c1bb
children c99892f0c5c3
comparison
equal deleted inserted replaced
594:72b4870f0e6b 595:b23bebfdfaba
114 bool 114 bool
115 AudioGenerator::addModel(Model *model) 115 AudioGenerator::addModel(Model *model)
116 { 116 {
117 if (m_sourceSampleRate == 0) { 117 if (m_sourceSampleRate == 0) {
118 118
119 m_sourceSampleRate = model->getSampleRate(); 119 m_sourceSampleRate = model->getSampleRate();
120 120
121 } else { 121 } else {
122 122
123 DenseTimeValueModel *dtvm = 123 DenseTimeValueModel *dtvm =
124 dynamic_cast<DenseTimeValueModel *>(model); 124 dynamic_cast<DenseTimeValueModel *>(model);
125 125
126 if (dtvm) { 126 if (dtvm) {
127 m_sourceSampleRate = model->getSampleRate(); 127 m_sourceSampleRate = model->getSampleRate();
128 return true; 128 return true;
129 } 129 }
130 } 130 }
131 131
132 const Playable *playable = model; 132 const Playable *playable = model;
133 if (!playable || !playable->canPlay()) return 0; 133 if (!playable || !playable->canPlay()) return 0;
134 134
135 PlayParameters *parameters = 135 PlayParameters *parameters =
136 PlayParameterRepository::getInstance()->getPlayParameters(playable); 136 PlayParameterRepository::getInstance()->getPlayParameters(playable);
137 137
138 bool willPlay = !parameters->isPlayMuted(); 138 bool willPlay = !parameters->isPlayMuted();
139 139
140 if (usesClipMixer(model)) { 140 if (usesClipMixer(model)) {
141 ClipMixer *mixer = makeClipMixerFor(model); 141 ClipMixer *mixer = makeClipMixerFor(model);
215 215
216 const Playable *playable = model; 216 const Playable *playable = model;
217 if (!playable || !playable->canPlay()) return 0; 217 if (!playable || !playable->canPlay()) return 0;
218 218
219 PlayParameters *parameters = 219 PlayParameters *parameters =
220 PlayParameterRepository::getInstance()->getPlayParameters(playable); 220 PlayParameterRepository::getInstance()->getPlayParameters(playable);
221 if (parameters) { 221 if (parameters) {
222 clipId = parameters->getPlayClipId(); 222 clipId = parameters->getPlayClipId();
223 } 223 }
224 224
225 #ifdef DEBUG_AUDIO_GENERATOR 225 #ifdef DEBUG_AUDIO_GENERATOR
272 272
273 void 273 void
274 AudioGenerator::removeModel(Model *model) 274 AudioGenerator::removeModel(Model *model)
275 { 275 {
276 SparseOneDimensionalModel *sodm = 276 SparseOneDimensionalModel *sodm =
277 dynamic_cast<SparseOneDimensionalModel *>(model); 277 dynamic_cast<SparseOneDimensionalModel *>(model);
278 if (!sodm) return; // nothing to do 278 if (!sodm) return; // nothing to do
279 279
280 QMutexLocker locker(&m_mutex); 280 QMutexLocker locker(&m_mutex);
281 281
282 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return; 282 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
291 { 291 {
292 QMutexLocker locker(&m_mutex); 292 QMutexLocker locker(&m_mutex);
293 293
294 while (!m_clipMixerMap.empty()) { 294 while (!m_clipMixerMap.empty()) {
295 ClipMixer *mixer = m_clipMixerMap.begin()->second; 295 ClipMixer *mixer = m_clipMixerMap.begin()->second;
296 m_clipMixerMap.erase(m_clipMixerMap.begin()); 296 m_clipMixerMap.erase(m_clipMixerMap.begin());
297 delete mixer; 297 delete mixer;
298 } 298 }
299 } 299 }
300 300
301 void 301 void
302 AudioGenerator::reset() 302 AudioGenerator::reset()
306 #ifdef DEBUG_AUDIO_GENERATOR 306 #ifdef DEBUG_AUDIO_GENERATOR
307 cerr << "AudioGenerator::reset()" << endl; 307 cerr << "AudioGenerator::reset()" << endl;
308 #endif 308 #endif
309 309
310 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { 310 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
311 if (i->second) { 311 if (i->second) {
312 i->second->reset(); 312 i->second->reset();
313 } 313 }
314 } 314 }
315 315
316 m_noteOffs.clear(); 316 m_noteOffs.clear();
317 } 317 }
318 318
325 325
326 QMutexLocker locker(&m_mutex); 326 QMutexLocker locker(&m_mutex);
327 m_targetChannelCount = targetChannelCount; 327 m_targetChannelCount = targetChannelCount;
328 328
329 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { 329 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
330 if (i->second) i->second->setChannelCount(targetChannelCount); 330 if (i->second) i->second->setChannelCount(targetChannelCount);
331 } 331 }
332 } 332 }
333 333
334 sv_frame_t 334 sv_frame_t
335 AudioGenerator::getBlockSize() const 335 AudioGenerator::getBlockSize() const
355 m_soloing = false; 355 m_soloing = false;
356 } 356 }
357 357
358 sv_frame_t 358 sv_frame_t
359 AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount, 359 AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
360 float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut) 360 float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut)
361 { 361 {
362 if (m_sourceSampleRate == 0) { 362 if (m_sourceSampleRate == 0) {
363 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; 363 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
364 return frameCount; 364 return frameCount;
365 } 365 }
366 366
367 QMutexLocker locker(&m_mutex); 367 QMutexLocker locker(&m_mutex);
368 368
369 Playable *playable = model; 369 Playable *playable = model;
370 if (!playable || !playable->canPlay()) return frameCount; 370 if (!playable || !playable->canPlay()) return frameCount;
371 371
372 PlayParameters *parameters = 372 PlayParameters *parameters =
373 PlayParameterRepository::getInstance()->getPlayParameters(playable); 373 PlayParameterRepository::getInstance()->getPlayParameters(playable);
374 if (!parameters) return frameCount; 374 if (!parameters) return frameCount;
375 375
376 bool playing = !parameters->isPlayMuted(); 376 bool playing = !parameters->isPlayMuted();
377 if (!playing) { 377 if (!playing) {
378 #ifdef DEBUG_AUDIO_GENERATOR 378 #ifdef DEBUG_AUDIO_GENERATOR
393 float gain = parameters->getPlayGain(); 393 float gain = parameters->getPlayGain();
394 float pan = parameters->getPlayPan(); 394 float pan = parameters->getPlayPan();
395 395
396 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model); 396 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
397 if (dtvm) { 397 if (dtvm) {
398 return mixDenseTimeValueModel(dtvm, startFrame, frameCount, 398 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
399 buffer, gain, pan, fadeIn, fadeOut); 399 buffer, gain, pan, fadeIn, fadeOut);
400 } 400 }
401 401
402 if (usesClipMixer(model)) { 402 if (usesClipMixer(model)) {
403 return mixClipModel(model, startFrame, frameCount, 403 return mixClipModel(model, startFrame, frameCount,
404 buffer, gain, pan); 404 buffer, gain, pan);
414 return frameCount; 414 return frameCount;
415 } 415 }
416 416
417 sv_frame_t 417 sv_frame_t
418 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, 418 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
419 sv_frame_t startFrame, sv_frame_t frames, 419 sv_frame_t startFrame, sv_frame_t frames,
420 float **buffer, float gain, float pan, 420 float **buffer, float gain, float pan,
421 sv_frame_t fadeIn, sv_frame_t fadeOut) 421 sv_frame_t fadeIn, sv_frame_t fadeOut)
422 { 422 {
423 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut); 423 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut);
424 424
425 int modelChannels = dtvm->getChannelCount(); 425 int modelChannels = dtvm->getChannelCount();
426 426
428 428
429 for (int c = 0; c < m_channelBufCount; ++c) { 429 for (int c = 0; c < m_channelBufCount; ++c) {
430 delete[] m_channelBuffer[c]; 430 delete[] m_channelBuffer[c];
431 } 431 }
432 432
433 delete[] m_channelBuffer; 433 delete[] m_channelBuffer;
434 m_channelBuffer = new float *[modelChannels]; 434 m_channelBuffer = new float *[modelChannels];
435 435
436 for (int c = 0; c < modelChannels; ++c) { 436 for (int c = 0; c < modelChannels; ++c) {
437 m_channelBuffer[c] = new float[maxFrames]; 437 m_channelBuffer[c] = new float[maxFrames];
438 } 438 }
439 439
440 m_channelBufCount = modelChannels; 440 m_channelBufCount = modelChannels;
441 m_channelBufSiz = maxFrames; 441 m_channelBufSiz = maxFrames;
442 } 442 }
443 443
444 sv_frame_t got = 0; 444 sv_frame_t got = 0;
445 445
446 if (startFrame >= fadeIn/2) { 446 if (startFrame >= fadeIn/2) {
471 for (int c = 0; c < modelChannels; ++c) { 471 for (int c = 0; c < modelChannels; ++c) {
472 copy(data[c].begin(), data[c].end(), m_channelBuffer[c] + missing); 472 copy(data[c].begin(), data[c].end(), m_channelBuffer[c] + missing);
473 } 473 }
474 474
475 got = data[0].size() + missing; 475 got = data[0].size() + missing;
476 } 476 }
477 477
478 for (int c = 0; c < m_targetChannelCount; ++c) { 478 for (int c = 0; c < m_targetChannelCount; ++c) {
479 479
480 int sourceChannel = (c % modelChannels); 480 int sourceChannel = (c % modelChannels);
481 481
482 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; 482 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
483 483
484 float channelGain = gain; 484 float channelGain = gain;
485 if (pan != 0.0) { 485 if (pan != 0.0) {
486 if (c == 0) { 486 if (c == 0) {
487 if (pan > 0.0) channelGain *= 1.0f - pan; 487 if (pan > 0.0) channelGain *= 1.0f - pan;
488 } else { 488 } else {
489 if (pan < 0.0) channelGain *= pan + 1.0f; 489 if (pan < 0.0) channelGain *= pan + 1.0f;
490 } 490 }
491 } 491 }
492 492
493 for (sv_frame_t i = 0; i < fadeIn/2; ++i) { 493 for (sv_frame_t i = 0; i < fadeIn/2; ++i) {
494 float *back = buffer[c]; 494 float *back = buffer[c];
495 back -= fadeIn/2; 495 back -= fadeIn/2;
496 back[i] += 496 back[i] +=
497 (channelGain * m_channelBuffer[sourceChannel][i] * float(i)) 497 (channelGain * m_channelBuffer[sourceChannel][i] * float(i))
498 / float(fadeIn); 498 / float(fadeIn);
499 } 499 }
500 500
501 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) { 501 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) {
502 float mult = channelGain; 502 float mult = channelGain;
503 if (i < fadeIn/2) { 503 if (i < fadeIn/2) {
504 mult = (mult * float(i)) / float(fadeIn); 504 mult = (mult * float(i)) / float(fadeIn);
505 } 505 }
506 if (i > frames - fadeOut/2) { 506 if (i > frames - fadeOut/2) {
507 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut); 507 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut);
508 } 508 }
509 float val = m_channelBuffer[sourceChannel][i]; 509 float val = m_channelBuffer[sourceChannel][i];
510 if (i >= got) val = 0.f; 510 if (i >= got) val = 0.f;
511 buffer[c][i] += mult * val; 511 buffer[c][i] += mult * val;
512 } 512 }
513 } 513 }
514 514
515 return got; 515 return got;
516 } 516 }
517 517
552 552
553 float **bufferIndexes = new float *[m_targetChannelCount]; 553 float **bufferIndexes = new float *[m_targetChannelCount];
554 554
555 for (int i = 0; i < blocks; ++i) { 555 for (int i = 0; i < blocks; ++i) {
556 556
557 sv_frame_t reqStart = startFrame + i * m_processingBlockSize; 557 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
558 558
559 NoteList notes; 559 NoteList notes;
560 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); 560 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
561 if (exportable) { 561 if (exportable) {
562 notes = exportable->getNotesWithin(reqStart, 562 notes = exportable->getNotesWithin(reqStart,
564 } 564 }
565 565
566 std::vector<ClipMixer::NoteStart> starts; 566 std::vector<ClipMixer::NoteStart> starts;
567 std::vector<ClipMixer::NoteEnd> ends; 567 std::vector<ClipMixer::NoteEnd> ends;
568 568
569 for (NoteList::const_iterator ni = notes.begin(); 569 for (NoteList::const_iterator ni = notes.begin();
570 ni != notes.end(); ++ni) { 570 ni != notes.end(); ++ni) {
571 571
572 sv_frame_t noteFrame = ni->start; 572 sv_frame_t noteFrame = ni->start;
573 573
574 if (noteFrame < reqStart || 574 if (noteFrame < reqStart ||
575 noteFrame >= reqStart + m_processingBlockSize) continue; 575 noteFrame >= reqStart + m_processingBlockSize) continue;
576 576
577 while (noteOffs.begin() != noteOffs.end() && 577 while (noteOffs.begin() != noteOffs.end() &&
578 noteOffs.begin()->frame <= noteFrame) { 578 noteOffs.begin()->frame <= noteFrame) {
579 579
580 sv_frame_t eventFrame = noteOffs.begin()->frame; 580 sv_frame_t eventFrame = noteOffs.begin()->frame;
581 if (eventFrame < reqStart) eventFrame = reqStart; 581 if (eventFrame < reqStart) eventFrame = reqStart;
582 582
583 off.frameOffset = eventFrame - reqStart; 583 off.frameOffset = eventFrame - reqStart;
584 off.frequency = noteOffs.begin()->frequency; 584 off.frequency = noteOffs.begin()->frequency;
585 585
586 #ifdef DEBUG_AUDIO_GENERATOR 586 #ifdef DEBUG_AUDIO_GENERATOR
587 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl; 587 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
588 #endif 588 #endif
589 589
590 ends.push_back(off); 590 ends.push_back(off);
591 noteOffs.erase(noteOffs.begin()); 591 noteOffs.erase(noteOffs.begin());
592 } 592 }
593 593
594 on.frameOffset = noteFrame - reqStart; 594 on.frameOffset = noteFrame - reqStart;
595 on.frequency = ni->getFrequency(); 595 on.frequency = ni->getFrequency();
596 on.level = float(ni->velocity) / 127.0f; 596 on.level = float(ni->velocity) / 127.0f;
597 on.pan = pan; 597 on.pan = pan;
598 598
599 #ifdef DEBUG_AUDIO_GENERATOR 599 #ifdef DEBUG_AUDIO_GENERATOR
600 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl; 600 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
601 #endif 601 #endif
602 602
603 starts.push_back(on); 603 starts.push_back(on);
604 noteOffs.insert 604 noteOffs.insert
605 (NoteOff(on.frequency, noteFrame + ni->duration)); 605 (NoteOff(on.frequency, noteFrame + ni->duration));
606 } 606 }
607 607
608 while (noteOffs.begin() != noteOffs.end() && 608 while (noteOffs.begin() != noteOffs.end() &&
609 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { 609 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
610 610
611 sv_frame_t eventFrame = noteOffs.begin()->frame; 611 sv_frame_t eventFrame = noteOffs.begin()->frame;
612 if (eventFrame < reqStart) eventFrame = reqStart; 612 if (eventFrame < reqStart) eventFrame = reqStart;
613 613
614 off.frameOffset = eventFrame - reqStart; 614 off.frameOffset = eventFrame - reqStart;
618 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl; 618 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
619 #endif 619 #endif
620 620
621 ends.push_back(off); 621 ends.push_back(off);
622 noteOffs.erase(noteOffs.begin()); 622 noteOffs.erase(noteOffs.begin());
623 } 623 }
624 624
625 for (int c = 0; c < m_targetChannelCount; ++c) { 625 for (int c = 0; c < m_targetChannelCount; ++c) {
626 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; 626 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
627 } 627 }
628 628
629 clipMixer->mix(bufferIndexes, gain, starts, ends); 629 clipMixer->mix(bufferIndexes, gain, starts, ends);
630 } 630 }
655 655
656 sv_frame_t got = blocks * m_processingBlockSize; 656 sv_frame_t got = blocks * m_processingBlockSize;
657 657
658 #ifdef DEBUG_AUDIO_GENERATOR 658 #ifdef DEBUG_AUDIO_GENERATOR
659 cout << "mixModel [synth]: frames " << frames 659 cout << "mixModel [synth]: frames " << frames
660 << ", blocks " << blocks << endl; 660 << ", blocks " << blocks << endl;
661 #endif 661 #endif
662 662
663 float **bufferIndexes = new float *[m_targetChannelCount]; 663 float **bufferIndexes = new float *[m_targetChannelCount];
664 664
665 for (int i = 0; i < blocks; ++i) { 665 for (int i = 0; i < blocks; ++i) {
666 666
667 sv_frame_t reqStart = startFrame + i * m_processingBlockSize; 667 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
668 668
669 for (int c = 0; c < m_targetChannelCount; ++c) { 669 for (int c = 0; c < m_targetChannelCount; ++c) {
670 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; 670 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
671 } 671 }
672 672
673 SparseTimeValueModel::PointList points = 673 SparseTimeValueModel::PointList points =
674 stvm->getPoints(reqStart, reqStart + m_processingBlockSize); 674 stvm->getPoints(reqStart, reqStart + m_processingBlockSize);