annotate audio/AudioGenerator.cpp @ 676:0d4236961c97

Ensure that AggregateModels are actually deleted on release despite not being in model list (ugh, this is ugly)
author Chris Cannam
date Tue, 11 Jun 2019 13:39:17 +0100
parents ed9cb577eb7c
children 161063152ddd
rev   line source
Chris@43 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@43 2
Chris@43 3 /*
Chris@43 4 Sonic Visualiser
Chris@43 5 An audio file viewer and annotation editor.
Chris@43 6 Centre for Digital Music, Queen Mary, University of London.
Chris@43 7 This file copyright 2006 Chris Cannam.
Chris@43 8
Chris@43 9 This program is free software; you can redistribute it and/or
Chris@43 10 modify it under the terms of the GNU General Public License as
Chris@43 11 published by the Free Software Foundation; either version 2 of the
Chris@43 12 License, or (at your option) any later version. See the file
Chris@43 13 COPYING included with this distribution for more information.
Chris@43 14 */
Chris@43 15
Chris@43 16 #include "AudioGenerator.h"
Chris@43 17
Chris@43 18 #include "base/TempDirectory.h"
Chris@43 19 #include "base/PlayParameters.h"
Chris@43 20 #include "base/PlayParameterRepository.h"
Chris@43 21 #include "base/Pitch.h"
Chris@43 22 #include "base/Exceptions.h"
Chris@43 23
Chris@43 24 #include "data/model/NoteModel.h"
Chris@43 25 #include "data/model/DenseTimeValueModel.h"
Chris@313 26 #include "data/model/SparseTimeValueModel.h"
Chris@43 27 #include "data/model/SparseOneDimensionalModel.h"
Chris@645 28 #include "base/NoteData.h"
Chris@43 29
Chris@307 30 #include "ClipMixer.h"
Chris@313 31 #include "ContinuousSynth.h"
Chris@307 32
Chris@43 33 #include <iostream>
Chris@167 34 #include <cmath>
Chris@43 35
Chris@43 36 #include <QDir>
Chris@43 37 #include <QFile>
Chris@43 38
Chris@436 39 const sv_frame_t
Chris@315 40 AudioGenerator::m_processingBlockSize = 1024;
Chris@43 41
Chris@43 42 QString
Chris@43 43 AudioGenerator::m_sampleDir = "";
Chris@43 44
Chris@43 45 //#define DEBUG_AUDIO_GENERATOR 1
Chris@43 46
Chris@43 47 AudioGenerator::AudioGenerator() :
Chris@43 48 m_sourceSampleRate(0),
Chris@43 49 m_targetChannelCount(1),
Chris@348 50 m_waveType(0),
Chris@382 51 m_soloing(false),
Chris@636 52 m_channelBuffer(nullptr),
Chris@382 53 m_channelBufSiz(0),
Chris@382 54 m_channelBufCount(0)
Chris@43 55 {
Chris@108 56 initialiseSampleDir();
Chris@43 57
Chris@43 58 connect(PlayParameterRepository::getInstance(),
Chris@309 59 SIGNAL(playClipIdChanged(const Playable *, QString)),
Chris@43 60 this,
Chris@309 61 SLOT(playClipIdChanged(const Playable *, QString)));
Chris@43 62 }
Chris@43 63
Chris@43 64 AudioGenerator::~AudioGenerator()
Chris@43 65 {
Chris@177 66 #ifdef DEBUG_AUDIO_GENERATOR
Chris@596 67 cerr << "AudioGenerator::~AudioGenerator" << endl;
Chris@177 68 #endif
Chris@593 69
Chris@593 70 for (int i = 0; i < m_channelBufCount; ++i) {
Chris@593 71 delete[] m_channelBuffer[i];
Chris@593 72 }
Chris@593 73 delete[] m_channelBuffer;
Chris@43 74 }
Chris@43 75
Chris@108 76 void
Chris@108 77 AudioGenerator::initialiseSampleDir()
Chris@43 78 {
Chris@108 79 if (m_sampleDir != "") return;
Chris@108 80
Chris@108 81 try {
Chris@108 82 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
Chris@598 83 } catch (const DirectoryCreationFailed &f) {
Chris@293 84 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
Chris@108 85 << " Failed to create temporary sample directory"
Chris@293 86 << endl;
Chris@108 87 m_sampleDir = "";
Chris@108 88 return;
Chris@108 89 }
Chris@108 90
Chris@108 91 QDir sampleResourceDir(":/samples", "*.wav");
Chris@108 92
Chris@108 93 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
Chris@108 94
Chris@108 95 QString fileName(sampleResourceDir[i]);
Chris@108 96 QFile file(sampleResourceDir.filePath(fileName));
Chris@151 97 QString target = QDir(m_sampleDir).filePath(fileName);
Chris@108 98
Chris@151 99 if (!file.copy(target)) {
Chris@293 100 cerr << "WARNING: AudioGenerator::getSampleDir: "
Chris@294 101 << "Unable to copy " << fileName
Chris@108 102 << " into temporary directory \""
Chris@293 103 << m_sampleDir << "\"" << endl;
Chris@151 104 } else {
Chris@151 105 QFile tf(target);
Chris@151 106 tf.setPermissions(tf.permissions() |
Chris@151 107 QFile::WriteOwner |
Chris@151 108 QFile::WriteUser);
Chris@108 109 }
Chris@43 110 }
Chris@43 111 }
Chris@43 112
Chris@43 113 bool
Chris@43 114 AudioGenerator::addModel(Model *model)
Chris@43 115 {
Chris@43 116 if (m_sourceSampleRate == 0) {
Chris@43 117
Chris@595 118 m_sourceSampleRate = model->getSampleRate();
Chris@43 119
Chris@43 120 } else {
Chris@43 121
Chris@595 122 DenseTimeValueModel *dtvm =
Chris@595 123 dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 124
Chris@595 125 if (dtvm) {
Chris@595 126 m_sourceSampleRate = model->getSampleRate();
Chris@595 127 return true;
Chris@595 128 }
Chris@43 129 }
Chris@307 130
Chris@418 131 const Playable *playable = model;
Chris@418 132 if (!playable || !playable->canPlay()) return 0;
Chris@418 133
Chris@418 134 PlayParameters *parameters =
Chris@595 135 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@418 136
Chris@418 137 bool willPlay = !parameters->isPlayMuted();
Chris@418 138
Chris@313 139 if (usesClipMixer(model)) {
Chris@313 140 ClipMixer *mixer = makeClipMixerFor(model);
Chris@313 141 if (mixer) {
Chris@313 142 QMutexLocker locker(&m_mutex);
Chris@616 143 m_clipMixerMap[model->getId()] = mixer;
Chris@418 144 return willPlay;
Chris@313 145 }
Chris@313 146 }
Chris@313 147
Chris@313 148 if (usesContinuousSynth(model)) {
Chris@313 149 ContinuousSynth *synth = makeSynthFor(model);
Chris@313 150 if (synth) {
Chris@313 151 QMutexLocker locker(&m_mutex);
Chris@616 152 m_continuousSynthMap[model->getId()] = synth;
Chris@418 153 return willPlay;
Chris@313 154 }
Chris@43 155 }
Chris@307 156
Chris@43 157 return false;
Chris@43 158 }
Chris@43 159
Chris@43 160 void
Chris@309 161 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
Chris@43 162 {
Chris@108 163 const Model *model = dynamic_cast<const Model *>(playable);
Chris@108 164 if (!model) {
Chris@309 165 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
Chris@108 166 << playable << " is not a supported model type"
Chris@293 167 << endl;
Chris@108 168 return;
Chris@108 169 }
Chris@108 170
Chris@616 171 if (m_clipMixerMap.find(model->getId()) == m_clipMixerMap.end()) {
Chris@616 172 return;
Chris@616 173 }
Chris@307 174
Chris@307 175 ClipMixer *mixer = makeClipMixerFor(model);
Chris@307 176 if (mixer) {
Chris@43 177 QMutexLocker locker(&m_mutex);
Chris@616 178 m_clipMixerMap[model->getId()] = mixer;
Chris@43 179 }
Chris@43 180 }
Chris@308 181
Chris@313 182 bool
Chris@313 183 AudioGenerator::usesClipMixer(const Model *model)
Chris@43 184 {
Chris@313 185 bool clip =
Chris@313 186 (qobject_cast<const SparseOneDimensionalModel *>(model) ||
Chris@646 187 qobject_cast<const NoteModel *>(model));
Chris@313 188 return clip;
Chris@43 189 }
Chris@43 190
Chris@313 191 bool
Chris@349 192 AudioGenerator::wantsQuieterClips(const Model *model)
Chris@349 193 {
Chris@349 194 // basically, anything that usually has sustain (like notes) or
Chris@349 195 // often has multiple sounds at once (like notes) wants to use a
Chris@349 196 // quieter level than simple click tracks
Chris@646 197 bool does = (qobject_cast<const NoteModel *>(model));
Chris@349 198 return does;
Chris@349 199 }
Chris@349 200
Chris@349 201 bool
Chris@313 202 AudioGenerator::usesContinuousSynth(const Model *model)
Chris@43 203 {
Chris@313 204 bool cont =
Chris@313 205 (qobject_cast<const SparseTimeValueModel *>(model));
Chris@313 206 return cont;
Chris@313 207 }
Chris@313 208
Chris@307 209 ClipMixer *
Chris@307 210 AudioGenerator::makeClipMixerFor(const Model *model)
Chris@43 211 {
Chris@309 212 QString clipId;
Chris@43 213
Chris@108 214 const Playable *playable = model;
Chris@636 215 if (!playable || !playable->canPlay()) return nullptr;
Chris@108 216
Chris@43 217 PlayParameters *parameters =
Chris@595 218 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 219 if (parameters) {
Chris@309 220 clipId = parameters->getPlayClipId();
Chris@43 221 }
Chris@43 222
Chris@445 223 #ifdef DEBUG_AUDIO_GENERATOR
Chris@309 224 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
Chris@445 225 #endif
Chris@276 226
Chris@309 227 if (clipId == "") {
Chris@308 228 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
Chris@636 229 return nullptr;
Chris@276 230 }
Chris@43 231
Chris@308 232 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
Chris@308 233 m_sourceSampleRate,
Chris@308 234 m_processingBlockSize);
Chris@307 235
Chris@436 236 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required
Chris@307 237
Chris@309 238 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
Chris@307 239
Chris@436 240 double level = wantsQuieterClips(model) ? 0.5 : 1.0;
Chris@349 241 if (!mixer->loadClipData(clipPath, clipF0, level)) {
Chris@308 242 delete mixer;
Chris@636 243 return nullptr;
Chris@43 244 }
Chris@43 245
Chris@445 246 #ifdef DEBUG_AUDIO_GENERATOR
Chris@309 247 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
Chris@445 248 #endif
Chris@43 249
Chris@308 250 return mixer;
Chris@308 251 }
Chris@43 252
Chris@313 253 ContinuousSynth *
Chris@313 254 AudioGenerator::makeSynthFor(const Model *model)
Chris@313 255 {
Chris@313 256 const Playable *playable = model;
Chris@636 257 if (!playable || !playable->canPlay()) return nullptr;
Chris@313 258
Chris@313 259 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
Chris@313 260 m_sourceSampleRate,
rmb456@323 261 m_processingBlockSize,
rmb456@323 262 m_waveType);
Chris@313 263
Chris@445 264 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 265 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
Chris@445 266 #endif
Chris@313 267
Chris@313 268 return synth;
Chris@313 269 }
Chris@313 270
Chris@43 271 void
Chris@43 272 AudioGenerator::removeModel(Model *model)
Chris@43 273 {
Chris@43 274 SparseOneDimensionalModel *sodm =
Chris@595 275 dynamic_cast<SparseOneDimensionalModel *>(model);
Chris@43 276 if (!sodm) return; // nothing to do
Chris@43 277
Chris@43 278 QMutexLocker locker(&m_mutex);
Chris@43 279
Chris@616 280 if (m_clipMixerMap.find(sodm->getId()) == m_clipMixerMap.end()) {
Chris@616 281 return;
Chris@616 282 }
Chris@43 283
Chris@616 284 ClipMixer *mixer = m_clipMixerMap[sodm->getId()];
Chris@616 285 m_clipMixerMap.erase(sodm->getId());
Chris@308 286 delete mixer;
Chris@43 287 }
Chris@43 288
Chris@43 289 void
Chris@43 290 AudioGenerator::clearModels()
Chris@43 291 {
Chris@43 292 QMutexLocker locker(&m_mutex);
Chris@308 293
Chris@308 294 while (!m_clipMixerMap.empty()) {
Chris@308 295 ClipMixer *mixer = m_clipMixerMap.begin()->second;
Chris@595 296 m_clipMixerMap.erase(m_clipMixerMap.begin());
Chris@595 297 delete mixer;
Chris@43 298 }
Chris@43 299 }
Chris@43 300
Chris@43 301 void
Chris@43 302 AudioGenerator::reset()
Chris@43 303 {
Chris@43 304 QMutexLocker locker(&m_mutex);
Chris@308 305
Chris@445 306 #ifdef DEBUG_AUDIO_GENERATOR
Chris@397 307 cerr << "AudioGenerator::reset()" << endl;
Chris@445 308 #endif
Chris@397 309
Chris@616 310 for (ClipMixerMap::iterator i = m_clipMixerMap.begin();
Chris@616 311 i != m_clipMixerMap.end(); ++i) {
Chris@595 312 if (i->second) {
Chris@595 313 i->second->reset();
Chris@595 314 }
Chris@43 315 }
Chris@43 316
Chris@43 317 m_noteOffs.clear();
Chris@43 318 }
Chris@43 319
Chris@43 320 void
Chris@366 321 AudioGenerator::setTargetChannelCount(int targetChannelCount)
Chris@43 322 {
Chris@43 323 if (m_targetChannelCount == targetChannelCount) return;
Chris@43 324
Chris@233 325 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
Chris@43 326
Chris@43 327 QMutexLocker locker(&m_mutex);
Chris@43 328 m_targetChannelCount = targetChannelCount;
Chris@43 329
Chris@308 330 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@595 331 if (i->second) i->second->setChannelCount(targetChannelCount);
Chris@43 332 }
Chris@43 333 }
Chris@43 334
Chris@436 335 sv_frame_t
Chris@43 336 AudioGenerator::getBlockSize() const
Chris@43 337 {
Chris@305 338 return m_processingBlockSize;
Chris@43 339 }
Chris@43 340
Chris@43 341 void
Chris@43 342 AudioGenerator::setSoloModelSet(std::set<Model *> s)
Chris@43 343 {
Chris@43 344 QMutexLocker locker(&m_mutex);
Chris@43 345
Chris@43 346 m_soloModelSet = s;
Chris@43 347 m_soloing = true;
Chris@43 348 }
Chris@43 349
Chris@43 350 void
Chris@43 351 AudioGenerator::clearSoloModelSet()
Chris@43 352 {
Chris@43 353 QMutexLocker locker(&m_mutex);
Chris@43 354
Chris@43 355 m_soloModelSet.clear();
Chris@43 356 m_soloing = false;
Chris@43 357 }
Chris@43 358
Chris@436 359 sv_frame_t
Chris@613 360 AudioGenerator::mixModel(Model *model,
Chris@613 361 sv_frame_t startFrame, sv_frame_t frameCount,
Chris@613 362 float **buffer,
Chris@613 363 sv_frame_t fadeIn, sv_frame_t fadeOut)
Chris@43 364 {
Chris@43 365 if (m_sourceSampleRate == 0) {
Chris@595 366 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
Chris@595 367 return frameCount;
Chris@43 368 }
Chris@43 369
Chris@43 370 QMutexLocker locker(&m_mutex);
Chris@43 371
Chris@108 372 Playable *playable = model;
Chris@108 373 if (!playable || !playable->canPlay()) return frameCount;
Chris@108 374
Chris@43 375 PlayParameters *parameters =
Chris@595 376 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 377 if (!parameters) return frameCount;
Chris@43 378
Chris@43 379 bool playing = !parameters->isPlayMuted();
Chris@43 380 if (!playing) {
Chris@43 381 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 382 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
Chris@43 383 #endif
Chris@43 384 return frameCount;
Chris@43 385 }
Chris@43 386
Chris@43 387 if (m_soloing) {
Chris@43 388 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
Chris@43 389 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 390 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
Chris@43 391 #endif
Chris@43 392 return frameCount;
Chris@43 393 }
Chris@43 394 }
Chris@43 395
Chris@43 396 float gain = parameters->getPlayGain();
Chris@43 397 float pan = parameters->getPlayPan();
Chris@43 398
Chris@43 399 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 400 if (dtvm) {
Chris@595 401 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
Chris@595 402 buffer, gain, pan, fadeIn, fadeOut);
Chris@43 403 }
Chris@43 404
Chris@313 405 if (usesClipMixer(model)) {
Chris@313 406 return mixClipModel(model, startFrame, frameCount,
Chris@313 407 buffer, gain, pan);
Chris@313 408 }
Chris@43 409
Chris@313 410 if (usesContinuousSynth(model)) {
Chris@313 411 return mixContinuousSynthModel(model, startFrame, frameCount,
Chris@313 412 buffer, gain, pan);
Chris@43 413 }
Chris@43 414
Chris@276 415 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
Chris@276 416
Chris@43 417 return frameCount;
Chris@43 418 }
Chris@43 419
Chris@436 420 sv_frame_t
Chris@43 421 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
Chris@595 422 sv_frame_t startFrame, sv_frame_t frames,
Chris@595 423 float **buffer, float gain, float pan,
Chris@595 424 sv_frame_t fadeIn, sv_frame_t fadeOut)
Chris@43 425 {
Chris@436 426 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut);
Chris@43 427
Chris@366 428 int modelChannels = dtvm->getChannelCount();
Chris@80 429
Chris@382 430 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
Chris@80 431
Chris@382 432 for (int c = 0; c < m_channelBufCount; ++c) {
Chris@382 433 delete[] m_channelBuffer[c];
Chris@80 434 }
Chris@80 435
Chris@595 436 delete[] m_channelBuffer;
Chris@382 437 m_channelBuffer = new float *[modelChannels];
Chris@80 438
Chris@366 439 for (int c = 0; c < modelChannels; ++c) {
Chris@382 440 m_channelBuffer[c] = new float[maxFrames];
Chris@80 441 }
Chris@80 442
Chris@382 443 m_channelBufCount = modelChannels;
Chris@595 444 m_channelBufSiz = maxFrames;
Chris@43 445 }
Chris@80 446
Chris@436 447 sv_frame_t got = 0;
Chris@80 448
Chris@80 449 if (startFrame >= fadeIn/2) {
Chris@460 450
Chris@460 451 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
Chris@460 452 startFrame - fadeIn/2,
Chris@460 453 frames + fadeOut/2 + fadeIn/2);
Chris@460 454
Chris@460 455 for (int c = 0; c < modelChannels; ++c) {
Chris@460 456 copy(data[c].begin(), data[c].end(), m_channelBuffer[c]);
Chris@460 457 }
Chris@460 458
Chris@461 459 got = data[0].size();
Chris@460 460
Chris@80 461 } else {
Chris@436 462 sv_frame_t missing = fadeIn/2 - startFrame;
Chris@80 463
Chris@382 464 if (missing > 0) {
Chris@382 465 cerr << "note: channelBufSiz = " << m_channelBufSiz
Chris@382 466 << ", frames + fadeOut/2 = " << frames + fadeOut/2
Chris@382 467 << ", startFrame = " << startFrame
Chris@382 468 << ", missing = " << missing << endl;
Chris@80 469 }
Chris@80 470
Chris@460 471 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
Chris@460 472 startFrame,
Chris@460 473 frames + fadeOut/2);
Chris@366 474 for (int c = 0; c < modelChannels; ++c) {
Chris@460 475 copy(data[c].begin(), data[c].end(), m_channelBuffer[c] + missing);
Chris@80 476 }
Chris@80 477
Chris@461 478 got = data[0].size() + missing;
Chris@595 479 }
Chris@43 480
Chris@366 481 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@43 482
Chris@595 483 int sourceChannel = (c % modelChannels);
Chris@43 484
Chris@595 485 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
Chris@43 486
Chris@595 487 float channelGain = gain;
Chris@595 488 if (pan != 0.0) {
Chris@595 489 if (c == 0) {
Chris@595 490 if (pan > 0.0) channelGain *= 1.0f - pan;
Chris@595 491 } else {
Chris@595 492 if (pan < 0.0) channelGain *= pan + 1.0f;
Chris@595 493 }
Chris@595 494 }
Chris@43 495
Chris@595 496 for (sv_frame_t i = 0; i < fadeIn/2; ++i) {
Chris@595 497 float *back = buffer[c];
Chris@595 498 back -= fadeIn/2;
Chris@595 499 back[i] +=
Chris@436 500 (channelGain * m_channelBuffer[sourceChannel][i] * float(i))
Chris@436 501 / float(fadeIn);
Chris@595 502 }
Chris@43 503
Chris@595 504 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) {
Chris@595 505 float mult = channelGain;
Chris@595 506 if (i < fadeIn/2) {
Chris@595 507 mult = (mult * float(i)) / float(fadeIn);
Chris@595 508 }
Chris@595 509 if (i > frames - fadeOut/2) {
Chris@595 510 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut);
Chris@595 511 }
Chris@382 512 float val = m_channelBuffer[sourceChannel][i];
Chris@80 513 if (i >= got) val = 0.f;
Chris@595 514 buffer[c][i] += mult * val;
Chris@595 515 }
Chris@43 516 }
Chris@43 517
Chris@43 518 return got;
Chris@43 519 }
Chris@43 520
Chris@436 521 sv_frame_t
Chris@313 522 AudioGenerator::mixClipModel(Model *model,
Chris@436 523 sv_frame_t startFrame, sv_frame_t frames,
Chris@313 524 float **buffer, float gain, float pan)
Chris@43 525 {
Chris@616 526 ClipMixer *clipMixer = m_clipMixerMap[model->getId()];
Chris@308 527 if (!clipMixer) return 0;
Chris@43 528
Chris@436 529 int blocks = int(frames / m_processingBlockSize);
Chris@43 530
Chris@313 531 //!!! todo: the below -- it matters
Chris@313 532
Chris@43 533 //!!! hang on -- the fact that the audio callback play source's
Chris@43 534 //buffer is a multiple of the plugin's buffer size doesn't mean
Chris@43 535 //that we always get called for a multiple of it here (because it
Chris@43 536 //also depends on the JACK block size). how should we ensure that
Chris@43 537 //all models write the same amount in to the mix, and that we
Chris@43 538 //always have a multiple of the plugin buffer size? I guess this
Chris@43 539 //class has to be queryable for the plugin buffer size & the
Chris@43 540 //callback play source has to use that as a multiple for all the
Chris@43 541 //calls to mixModel
Chris@43 542
Chris@436 543 sv_frame_t got = blocks * m_processingBlockSize;
Chris@43 544
Chris@43 545 #ifdef DEBUG_AUDIO_GENERATOR
Chris@442 546 cout << "mixModel [clip]: start " << startFrame << ", frames " << frames
Chris@442 547 << ", blocks " << blocks << ", have " << m_noteOffs.size()
Chris@442 548 << " note-offs" << endl;
Chris@43 549 #endif
Chris@43 550
Chris@308 551 ClipMixer::NoteStart on;
Chris@308 552 ClipMixer::NoteEnd off;
Chris@43 553
Chris@616 554 NoteOffSet &noteOffs = m_noteOffs[model->getId()];
Chris@43 555
Chris@308 556 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@308 557
Chris@646 558 //!!! + for first block, prime with notes already active
Chris@646 559
Chris@366 560 for (int i = 0; i < blocks; ++i) {
Chris@43 561
Chris@595 562 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
Chris@43 563
Chris@299 564 NoteList notes;
Chris@299 565 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
Chris@299 566 if (exportable) {
Chris@646 567 notes = exportable->getNotesStartingWithin(reqStart,
Chris@646 568 m_processingBlockSize);
Chris@299 569 }
Chris@43 570
Chris@308 571 std::vector<ClipMixer::NoteStart> starts;
Chris@308 572 std::vector<ClipMixer::NoteEnd> ends;
Chris@43 573
Chris@615 574 while (noteOffs.begin() != noteOffs.end() &&
Chris@615 575 noteOffs.begin()->onFrame > reqStart) {
Chris@615 576
Chris@615 577 // We must have jumped back in time, as there is a
Chris@615 578 // note-off pending for a note that hasn't begun yet. Emit
Chris@615 579 // the note-off now and discard
Chris@615 580
Chris@615 581 off.frameOffset = 0;
Chris@615 582 off.frequency = noteOffs.begin()->frequency;
Chris@615 583
Chris@615 584 #ifdef DEBUG_AUDIO_GENERATOR
Chris@615 585 cerr << "mixModel [clip]: adding rewind-caused note-off at frame offset 0 frequency " << off.frequency << endl;
Chris@615 586 #endif
Chris@615 587
Chris@615 588 ends.push_back(off);
Chris@615 589 noteOffs.erase(noteOffs.begin());
Chris@615 590 }
Chris@615 591
Chris@595 592 for (NoteList::const_iterator ni = notes.begin();
Chris@275 593 ni != notes.end(); ++ni) {
Chris@43 594
Chris@595 595 sv_frame_t noteFrame = ni->start;
Chris@596 596 sv_frame_t noteDuration = ni->duration;
Chris@43 597
Chris@595 598 if (noteFrame < reqStart ||
Chris@596 599 noteFrame >= reqStart + m_processingBlockSize) {
Chris@596 600 continue;
Chris@596 601 }
Chris@596 602
Chris@596 603 if (noteDuration == 0) {
Chris@596 604 // If we have a note-off and a note-on with the same
Chris@596 605 // time, then the note-off will be assumed (in the
Chris@596 606 // logic below that deals with two-point note-on/off
Chris@596 607 // events) to be switching off an earlier note before
Chris@596 608 // this one begins -- that's necessary in order to
Chris@596 609 // support adjoining notes of equal pitch. But it does
Chris@596 610 // mean we have to explicitly ignore zero-duration
Chris@596 611 // notes, otherwise they'll be played without end
Chris@596 612 #ifdef DEBUG_AUDIO_GENERATOR
Chris@596 613 cerr << "mixModel [clip]: zero-duration note found at frame " << noteFrame << ", skipping it" << endl;
Chris@596 614 #endif
Chris@596 615 continue;
Chris@596 616 }
Chris@43 617
Chris@595 618 while (noteOffs.begin() != noteOffs.end() &&
Chris@615 619 noteOffs.begin()->offFrame <= noteFrame) {
Chris@43 620
Chris@615 621 sv_frame_t eventFrame = noteOffs.begin()->offFrame;
Chris@308 622 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 623
Chris@308 624 off.frameOffset = eventFrame - reqStart;
Chris@308 625 off.frequency = noteOffs.begin()->frequency;
Chris@43 626
Chris@43 627 #ifdef DEBUG_AUDIO_GENERATOR
Chris@595 628 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 629 #endif
Chris@43 630
Chris@308 631 ends.push_back(off);
Chris@595 632 noteOffs.erase(noteOffs.begin());
Chris@595 633 }
Chris@43 634
Chris@308 635 on.frameOffset = noteFrame - reqStart;
Chris@308 636 on.frequency = ni->getFrequency();
Chris@436 637 on.level = float(ni->velocity) / 127.0f;
Chris@308 638 on.pan = pan;
Chris@43 639
Chris@43 640 #ifdef DEBUG_AUDIO_GENERATOR
Chris@595 641 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
Chris@43 642 #endif
Chris@595 643
Chris@308 644 starts.push_back(on);
Chris@595 645 noteOffs.insert
Chris@615 646 (NoteOff(on.frequency, noteFrame + noteDuration, noteFrame));
Chris@595 647 }
Chris@43 648
Chris@595 649 while (noteOffs.begin() != noteOffs.end() &&
Chris@615 650 noteOffs.begin()->offFrame <=
Chris@615 651 reqStart + m_processingBlockSize) {
Chris@43 652
Chris@615 653 sv_frame_t eventFrame = noteOffs.begin()->offFrame;
Chris@308 654 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 655
Chris@308 656 off.frameOffset = eventFrame - reqStart;
Chris@308 657 off.frequency = noteOffs.begin()->frequency;
Chris@43 658
Chris@43 659 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 660 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 661 #endif
Chris@43 662
Chris@308 663 ends.push_back(off);
Chris@308 664 noteOffs.erase(noteOffs.begin());
Chris@595 665 }
Chris@43 666
Chris@595 667 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@308 668 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@308 669 }
Chris@43 670
Chris@308 671 clipMixer->mix(bufferIndexes, gain, starts, ends);
Chris@308 672 }
Chris@43 673
Chris@308 674 delete[] bufferIndexes;
Chris@43 675
Chris@43 676 return got;
Chris@43 677 }
Chris@313 678
Chris@436 679 sv_frame_t
Chris@313 680 AudioGenerator::mixContinuousSynthModel(Model *model,
Chris@436 681 sv_frame_t startFrame,
Chris@436 682 sv_frame_t frames,
Chris@313 683 float **buffer,
Chris@313 684 float gain,
Chris@313 685 float pan)
Chris@313 686 {
Chris@616 687 ContinuousSynth *synth = m_continuousSynthMap[model->getId()];
Chris@313 688 if (!synth) return 0;
Chris@313 689
Chris@313 690 // only type we support here at the moment
Chris@313 691 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
Chris@313 692 if (stvm->getScaleUnits() != "Hz") return 0;
Chris@313 693
Chris@436 694 int blocks = int(frames / m_processingBlockSize);
Chris@313 695
Chris@313 696 //!!! todo: see comment in mixClipModel
Chris@313 697
Chris@436 698 sv_frame_t got = blocks * m_processingBlockSize;
Chris@313 699
Chris@313 700 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 701 cout << "mixModel [synth]: frames " << frames
Chris@595 702 << ", blocks " << blocks << endl;
Chris@313 703 #endif
Chris@313 704
Chris@313 705 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@313 706
Chris@366 707 for (int i = 0; i < blocks; ++i) {
Chris@313 708
Chris@595 709 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
Chris@313 710
Chris@595 711 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@313 712 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@313 713 }
Chris@313 714
Chris@649 715 EventVector points =
Chris@649 716 stvm->getEventsStartingWithin(reqStart, m_processingBlockSize);
Chris@313 717
Chris@313 718 // by default, repeat last frequency
Chris@313 719 float f0 = 0.f;
Chris@313 720
Chris@649 721 // go straight to the last freq in this range
Chris@649 722 if (!points.empty()) {
Chris@649 723 f0 = points.rbegin()->getValue();
Chris@313 724 }
Chris@313 725
Chris@649 726 // if there is no such frequency and the next point is further
Chris@314 727 // away than twice the model resolution, go silent (same
Chris@314 728 // criterion TimeValueLayer uses for ending a discrete curve
Chris@314 729 // segment)
Chris@314 730 if (f0 == 0.f) {
Chris@650 731 Event nextP;
Chris@650 732 if (!stvm->getNearestEventMatching(reqStart + m_processingBlockSize,
Chris@650 733 [](Event) { return true; },
Chris@650 734 EventSeries::Forward,
Chris@650 735 nextP) ||
Chris@650 736 nextP.getFrame() > reqStart + 2 * stvm->getResolution()) {
Chris@314 737 f0 = -1.f;
Chris@314 738 }
Chris@314 739 }
Chris@650 740
Chris@315 741 // cerr << "f0 = " << f0 << endl;
Chris@313 742
Chris@313 743 synth->mix(bufferIndexes,
Chris@313 744 gain,
Chris@313 745 pan,
Chris@313 746 f0);
Chris@313 747 }
Chris@313 748
Chris@313 749 delete[] bufferIndexes;
Chris@313 750
Chris@313 751 return got;
Chris@313 752 }
Chris@313 753