Mercurial > hg > svapp
comparison audioio/AudioGenerator.cpp @ 366:0876ea394902 warnfix_no_size_t
Remove size_t's, fix compiler warnings
author | Chris Cannam |
---|---|
date | Tue, 17 Jun 2014 16:23:06 +0100 |
parents | 8d7f39df44ed |
children | 2484e6f95c06 |
comparison
equal
deleted
inserted
replaced
355:e7a3fa8f4eec | 366:0876ea394902 |
---|---|
35 #include <cmath> | 35 #include <cmath> |
36 | 36 |
37 #include <QDir> | 37 #include <QDir> |
38 #include <QFile> | 38 #include <QFile> |
39 | 39 |
40 const size_t | 40 const int |
41 AudioGenerator::m_processingBlockSize = 1024; | 41 AudioGenerator::m_processingBlockSize = 1024; |
42 | 42 |
43 QString | 43 QString |
44 AudioGenerator::m_sampleDir = ""; | 44 AudioGenerator::m_sampleDir = ""; |
45 | 45 |
289 | 289 |
290 m_noteOffs.clear(); | 290 m_noteOffs.clear(); |
291 } | 291 } |
292 | 292 |
293 void | 293 void |
294 AudioGenerator::setTargetChannelCount(size_t targetChannelCount) | 294 AudioGenerator::setTargetChannelCount(int targetChannelCount) |
295 { | 295 { |
296 if (m_targetChannelCount == targetChannelCount) return; | 296 if (m_targetChannelCount == targetChannelCount) return; |
297 | 297 |
298 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl; | 298 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl; |
299 | 299 |
303 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { | 303 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { |
304 if (i->second) i->second->setChannelCount(targetChannelCount); | 304 if (i->second) i->second->setChannelCount(targetChannelCount); |
305 } | 305 } |
306 } | 306 } |
307 | 307 |
308 size_t | 308 int |
309 AudioGenerator::getBlockSize() const | 309 AudioGenerator::getBlockSize() const |
310 { | 310 { |
311 return m_processingBlockSize; | 311 return m_processingBlockSize; |
312 } | 312 } |
313 | 313 |
327 | 327 |
328 m_soloModelSet.clear(); | 328 m_soloModelSet.clear(); |
329 m_soloing = false; | 329 m_soloing = false; |
330 } | 330 } |
331 | 331 |
332 size_t | 332 int |
333 AudioGenerator::mixModel(Model *model, size_t startFrame, size_t frameCount, | 333 AudioGenerator::mixModel(Model *model, int startFrame, int frameCount, |
334 float **buffer, size_t fadeIn, size_t fadeOut) | 334 float **buffer, int fadeIn, int fadeOut) |
335 { | 335 { |
336 if (m_sourceSampleRate == 0) { | 336 if (m_sourceSampleRate == 0) { |
337 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; | 337 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; |
338 return frameCount; | 338 return frameCount; |
339 } | 339 } |
386 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; | 386 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; |
387 | 387 |
388 return frameCount; | 388 return frameCount; |
389 } | 389 } |
390 | 390 |
391 size_t | 391 int |
392 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, | 392 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, |
393 size_t startFrame, size_t frames, | 393 int startFrame, int frames, |
394 float **buffer, float gain, float pan, | 394 float **buffer, float gain, float pan, |
395 size_t fadeIn, size_t fadeOut) | 395 int fadeIn, int fadeOut) |
396 { | 396 { |
397 static float **channelBuffer = 0; | 397 static float **channelBuffer = 0; |
398 static size_t channelBufSiz = 0; | 398 static int channelBufSiz = 0; |
399 static size_t channelBufCount = 0; | 399 static int channelBufCount = 0; |
400 | 400 |
401 size_t totalFrames = frames + fadeIn/2 + fadeOut/2; | 401 int totalFrames = frames + fadeIn/2 + fadeOut/2; |
402 | 402 |
403 size_t modelChannels = dtvm->getChannelCount(); | 403 int modelChannels = dtvm->getChannelCount(); |
404 | 404 |
405 if (channelBufSiz < totalFrames || channelBufCount < modelChannels) { | 405 if (channelBufSiz < totalFrames || channelBufCount < modelChannels) { |
406 | 406 |
407 for (size_t c = 0; c < channelBufCount; ++c) { | 407 for (int c = 0; c < channelBufCount; ++c) { |
408 delete[] channelBuffer[c]; | 408 delete[] channelBuffer[c]; |
409 } | 409 } |
410 | 410 |
411 delete[] channelBuffer; | 411 delete[] channelBuffer; |
412 channelBuffer = new float *[modelChannels]; | 412 channelBuffer = new float *[modelChannels]; |
413 | 413 |
414 for (size_t c = 0; c < modelChannels; ++c) { | 414 for (int c = 0; c < modelChannels; ++c) { |
415 channelBuffer[c] = new float[totalFrames]; | 415 channelBuffer[c] = new float[totalFrames]; |
416 } | 416 } |
417 | 417 |
418 channelBufCount = modelChannels; | 418 channelBufCount = modelChannels; |
419 channelBufSiz = totalFrames; | 419 channelBufSiz = totalFrames; |
420 } | 420 } |
421 | 421 |
422 size_t got = 0; | 422 int got = 0; |
423 | 423 |
424 if (startFrame >= fadeIn/2) { | 424 if (startFrame >= fadeIn/2) { |
425 got = dtvm->getData(0, modelChannels - 1, | 425 got = dtvm->getData(0, modelChannels - 1, |
426 startFrame - fadeIn/2, | 426 startFrame - fadeIn/2, |
427 frames + fadeOut/2 + fadeIn/2, | 427 frames + fadeOut/2 + fadeIn/2, |
428 channelBuffer); | 428 channelBuffer); |
429 } else { | 429 } else { |
430 size_t missing = fadeIn/2 - startFrame; | 430 int missing = fadeIn/2 - startFrame; |
431 | 431 |
432 for (size_t c = 0; c < modelChannels; ++c) { | 432 for (int c = 0; c < modelChannels; ++c) { |
433 channelBuffer[c] += missing; | 433 channelBuffer[c] += missing; |
434 } | 434 } |
435 | 435 |
436 got = dtvm->getData(0, modelChannels - 1, | 436 got = dtvm->getData(0, modelChannels - 1, |
437 startFrame, | 437 startFrame, |
438 frames + fadeOut/2, | 438 frames + fadeOut/2, |
439 channelBuffer); | 439 channelBuffer); |
440 | 440 |
441 for (size_t c = 0; c < modelChannels; ++c) { | 441 for (int c = 0; c < modelChannels; ++c) { |
442 channelBuffer[c] -= missing; | 442 channelBuffer[c] -= missing; |
443 } | 443 } |
444 | 444 |
445 got += missing; | 445 got += missing; |
446 } | 446 } |
447 | 447 |
448 for (size_t c = 0; c < m_targetChannelCount; ++c) { | 448 for (int c = 0; c < m_targetChannelCount; ++c) { |
449 | 449 |
450 size_t sourceChannel = (c % modelChannels); | 450 int sourceChannel = (c % modelChannels); |
451 | 451 |
452 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; | 452 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; |
453 | 453 |
454 float channelGain = gain; | 454 float channelGain = gain; |
455 if (pan != 0.0) { | 455 if (pan != 0.0) { |
458 } else { | 458 } else { |
459 if (pan < 0.0) channelGain *= pan + 1.0; | 459 if (pan < 0.0) channelGain *= pan + 1.0; |
460 } | 460 } |
461 } | 461 } |
462 | 462 |
463 for (size_t i = 0; i < fadeIn/2; ++i) { | 463 for (int i = 0; i < fadeIn/2; ++i) { |
464 float *back = buffer[c]; | 464 float *back = buffer[c]; |
465 back -= fadeIn/2; | 465 back -= fadeIn/2; |
466 back[i] += (channelGain * channelBuffer[sourceChannel][i] * i) / fadeIn; | 466 back[i] += (channelGain * channelBuffer[sourceChannel][i] * i) / fadeIn; |
467 } | 467 } |
468 | 468 |
469 for (size_t i = 0; i < frames + fadeOut/2; ++i) { | 469 for (int i = 0; i < frames + fadeOut/2; ++i) { |
470 float mult = channelGain; | 470 float mult = channelGain; |
471 if (i < fadeIn/2) { | 471 if (i < fadeIn/2) { |
472 mult = (mult * i) / fadeIn; | 472 mult = (mult * i) / fadeIn; |
473 } | 473 } |
474 if (i > frames - fadeOut/2) { | 474 if (i > frames - fadeOut/2) { |
481 } | 481 } |
482 | 482 |
483 return got; | 483 return got; |
484 } | 484 } |
485 | 485 |
486 size_t | 486 int |
487 AudioGenerator::mixClipModel(Model *model, | 487 AudioGenerator::mixClipModel(Model *model, |
488 size_t startFrame, size_t frames, | 488 int startFrame, int frames, |
489 float **buffer, float gain, float pan) | 489 float **buffer, float gain, float pan) |
490 { | 490 { |
491 ClipMixer *clipMixer = m_clipMixerMap[model]; | 491 ClipMixer *clipMixer = m_clipMixerMap[model]; |
492 if (!clipMixer) return 0; | 492 if (!clipMixer) return 0; |
493 | 493 |
494 size_t blocks = frames / m_processingBlockSize; | 494 int blocks = frames / m_processingBlockSize; |
495 | 495 |
496 //!!! todo: the below -- it matters | 496 //!!! todo: the below -- it matters |
497 | 497 |
498 //!!! hang on -- the fact that the audio callback play source's | 498 //!!! hang on -- the fact that the audio callback play source's |
499 //buffer is a multiple of the plugin's buffer size doesn't mean | 499 //buffer is a multiple of the plugin's buffer size doesn't mean |
503 //always have a multiple of the plugin buffer size? I guess this | 503 //always have a multiple of the plugin buffer size? I guess this |
504 //class has to be queryable for the plugin buffer size & the | 504 //class has to be queryable for the plugin buffer size & the |
505 //callback play source has to use that as a multiple for all the | 505 //callback play source has to use that as a multiple for all the |
506 //calls to mixModel | 506 //calls to mixModel |
507 | 507 |
508 size_t got = blocks * m_processingBlockSize; | 508 int got = blocks * m_processingBlockSize; |
509 | 509 |
510 #ifdef DEBUG_AUDIO_GENERATOR | 510 #ifdef DEBUG_AUDIO_GENERATOR |
511 cout << "mixModel [clip]: frames " << frames | 511 cout << "mixModel [clip]: frames " << frames |
512 << ", blocks " << blocks << endl; | 512 << ", blocks " << blocks << endl; |
513 #endif | 513 #endif |
517 | 517 |
518 NoteOffSet ¬eOffs = m_noteOffs[model]; | 518 NoteOffSet ¬eOffs = m_noteOffs[model]; |
519 | 519 |
520 float **bufferIndexes = new float *[m_targetChannelCount]; | 520 float **bufferIndexes = new float *[m_targetChannelCount]; |
521 | 521 |
522 for (size_t i = 0; i < blocks; ++i) { | 522 for (int i = 0; i < blocks; ++i) { |
523 | 523 |
524 size_t reqStart = startFrame + i * m_processingBlockSize; | 524 int reqStart = startFrame + i * m_processingBlockSize; |
525 | 525 |
526 NoteList notes; | 526 NoteList notes; |
527 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); | 527 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); |
528 if (exportable) { | 528 if (exportable) { |
529 notes = exportable->getNotes(reqStart, | 529 notes = exportable->getNotesWithin(reqStart, |
530 reqStart + m_processingBlockSize); | 530 reqStart + m_processingBlockSize); |
531 } | 531 } |
532 | 532 |
533 std::vector<ClipMixer::NoteStart> starts; | 533 std::vector<ClipMixer::NoteStart> starts; |
534 std::vector<ClipMixer::NoteEnd> ends; | 534 std::vector<ClipMixer::NoteEnd> ends; |
535 | 535 |
536 for (NoteList::const_iterator ni = notes.begin(); | 536 for (NoteList::const_iterator ni = notes.begin(); |
537 ni != notes.end(); ++ni) { | 537 ni != notes.end(); ++ni) { |
538 | 538 |
539 size_t noteFrame = ni->start; | 539 int noteFrame = ni->start; |
540 | 540 |
541 if (noteFrame < reqStart || | 541 if (noteFrame < reqStart || |
542 noteFrame >= reqStart + m_processingBlockSize) continue; | 542 noteFrame >= reqStart + m_processingBlockSize) continue; |
543 | 543 |
544 while (noteOffs.begin() != noteOffs.end() && | 544 while (noteOffs.begin() != noteOffs.end() && |
545 noteOffs.begin()->frame <= noteFrame) { | 545 noteOffs.begin()->frame <= noteFrame) { |
546 | 546 |
547 size_t eventFrame = noteOffs.begin()->frame; | 547 int eventFrame = noteOffs.begin()->frame; |
548 if (eventFrame < reqStart) eventFrame = reqStart; | 548 if (eventFrame < reqStart) eventFrame = reqStart; |
549 | 549 |
550 off.frameOffset = eventFrame - reqStart; | 550 off.frameOffset = eventFrame - reqStart; |
551 off.frequency = noteOffs.begin()->frequency; | 551 off.frequency = noteOffs.begin()->frequency; |
552 | 552 |
573 } | 573 } |
574 | 574 |
575 while (noteOffs.begin() != noteOffs.end() && | 575 while (noteOffs.begin() != noteOffs.end() && |
576 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { | 576 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { |
577 | 577 |
578 size_t eventFrame = noteOffs.begin()->frame; | 578 int eventFrame = noteOffs.begin()->frame; |
579 if (eventFrame < reqStart) eventFrame = reqStart; | 579 if (eventFrame < reqStart) eventFrame = reqStart; |
580 | 580 |
581 off.frameOffset = eventFrame - reqStart; | 581 off.frameOffset = eventFrame - reqStart; |
582 off.frequency = noteOffs.begin()->frequency; | 582 off.frequency = noteOffs.begin()->frequency; |
583 | 583 |
587 | 587 |
588 ends.push_back(off); | 588 ends.push_back(off); |
589 noteOffs.erase(noteOffs.begin()); | 589 noteOffs.erase(noteOffs.begin()); |
590 } | 590 } |
591 | 591 |
592 for (size_t c = 0; c < m_targetChannelCount; ++c) { | 592 for (int c = 0; c < m_targetChannelCount; ++c) { |
593 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; | 593 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; |
594 } | 594 } |
595 | 595 |
596 clipMixer->mix(bufferIndexes, gain, starts, ends); | 596 clipMixer->mix(bufferIndexes, gain, starts, ends); |
597 } | 597 } |
599 delete[] bufferIndexes; | 599 delete[] bufferIndexes; |
600 | 600 |
601 return got; | 601 return got; |
602 } | 602 } |
603 | 603 |
604 size_t | 604 int |
605 AudioGenerator::mixContinuousSynthModel(Model *model, | 605 AudioGenerator::mixContinuousSynthModel(Model *model, |
606 size_t startFrame, | 606 int startFrame, |
607 size_t frames, | 607 int frames, |
608 float **buffer, | 608 float **buffer, |
609 float gain, | 609 float gain, |
610 float pan) | 610 float pan) |
611 { | 611 { |
612 ContinuousSynth *synth = m_continuousSynthMap[model]; | 612 ContinuousSynth *synth = m_continuousSynthMap[model]; |
614 | 614 |
615 // only type we support here at the moment | 615 // only type we support here at the moment |
616 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); | 616 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); |
617 if (stvm->getScaleUnits() != "Hz") return 0; | 617 if (stvm->getScaleUnits() != "Hz") return 0; |
618 | 618 |
619 size_t blocks = frames / m_processingBlockSize; | 619 int blocks = frames / m_processingBlockSize; |
620 | 620 |
621 //!!! todo: see comment in mixClipModel | 621 //!!! todo: see comment in mixClipModel |
622 | 622 |
623 size_t got = blocks * m_processingBlockSize; | 623 int got = blocks * m_processingBlockSize; |
624 | 624 |
625 #ifdef DEBUG_AUDIO_GENERATOR | 625 #ifdef DEBUG_AUDIO_GENERATOR |
626 cout << "mixModel [synth]: frames " << frames | 626 cout << "mixModel [synth]: frames " << frames |
627 << ", blocks " << blocks << endl; | 627 << ", blocks " << blocks << endl; |
628 #endif | 628 #endif |
629 | 629 |
630 float **bufferIndexes = new float *[m_targetChannelCount]; | 630 float **bufferIndexes = new float *[m_targetChannelCount]; |
631 | 631 |
632 for (size_t i = 0; i < blocks; ++i) { | 632 for (int i = 0; i < blocks; ++i) { |
633 | 633 |
634 size_t reqStart = startFrame + i * m_processingBlockSize; | 634 int reqStart = startFrame + i * m_processingBlockSize; |
635 | 635 |
636 for (size_t c = 0; c < m_targetChannelCount; ++c) { | 636 for (int c = 0; c < m_targetChannelCount; ++c) { |
637 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; | 637 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; |
638 } | 638 } |
639 | 639 |
640 SparseTimeValueModel::PointList points = | 640 SparseTimeValueModel::PointList points = |
641 stvm->getPoints(reqStart, reqStart + m_processingBlockSize); | 641 stvm->getPoints(reqStart, reqStart + m_processingBlockSize); |