annotate audio/AudioGenerator.cpp @ 588:d122d3595a32

Store aggregate models in the document and release them when they are invalidated (because their components have been released). They're no longer leaked, but we still don't save them in the session file.
author Chris Cannam
date Mon, 27 Feb 2017 16:26:37 +0000
parents 4480b031fe38
children 821aba42c1bb
rev   line source
Chris@43 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@43 2
Chris@43 3 /*
Chris@43 4 Sonic Visualiser
Chris@43 5 An audio file viewer and annotation editor.
Chris@43 6 Centre for Digital Music, Queen Mary, University of London.
Chris@43 7 This file copyright 2006 Chris Cannam.
Chris@43 8
Chris@43 9 This program is free software; you can redistribute it and/or
Chris@43 10 modify it under the terms of the GNU General Public License as
Chris@43 11 published by the Free Software Foundation; either version 2 of the
Chris@43 12 License, or (at your option) any later version. See the file
Chris@43 13 COPYING included with this distribution for more information.
Chris@43 14 */
Chris@43 15
Chris@43 16 #include "AudioGenerator.h"
Chris@43 17
Chris@43 18 #include "base/TempDirectory.h"
Chris@43 19 #include "base/PlayParameters.h"
Chris@43 20 #include "base/PlayParameterRepository.h"
Chris@43 21 #include "base/Pitch.h"
Chris@43 22 #include "base/Exceptions.h"
Chris@43 23
Chris@43 24 #include "data/model/NoteModel.h"
Chris@278 25 #include "data/model/FlexiNoteModel.h"
Chris@43 26 #include "data/model/DenseTimeValueModel.h"
Chris@313 27 #include "data/model/SparseTimeValueModel.h"
Chris@43 28 #include "data/model/SparseOneDimensionalModel.h"
Chris@299 29 #include "data/model/NoteData.h"
Chris@43 30
Chris@307 31 #include "ClipMixer.h"
Chris@313 32 #include "ContinuousSynth.h"
Chris@307 33
Chris@43 34 #include <iostream>
Chris@167 35 #include <cmath>
Chris@43 36
Chris@43 37 #include <QDir>
Chris@43 38 #include <QFile>
Chris@43 39
Chris@436 40 const sv_frame_t
Chris@315 41 AudioGenerator::m_processingBlockSize = 1024;
Chris@43 42
Chris@43 43 QString
Chris@43 44 AudioGenerator::m_sampleDir = "";
Chris@43 45
Chris@43 46 //#define DEBUG_AUDIO_GENERATOR 1
Chris@43 47
Chris@43 48 AudioGenerator::AudioGenerator() :
Chris@43 49 m_sourceSampleRate(0),
Chris@43 50 m_targetChannelCount(1),
Chris@348 51 m_waveType(0),
Chris@382 52 m_soloing(false),
Chris@382 53 m_channelBuffer(0),
Chris@382 54 m_channelBufSiz(0),
Chris@382 55 m_channelBufCount(0)
Chris@43 56 {
Chris@108 57 initialiseSampleDir();
Chris@43 58
Chris@43 59 connect(PlayParameterRepository::getInstance(),
Chris@309 60 SIGNAL(playClipIdChanged(const Playable *, QString)),
Chris@43 61 this,
Chris@309 62 SLOT(playClipIdChanged(const Playable *, QString)));
Chris@43 63 }
Chris@43 64
Chris@43 65 AudioGenerator::~AudioGenerator()
Chris@43 66 {
Chris@177 67 #ifdef DEBUG_AUDIO_GENERATOR
Chris@233 68 SVDEBUG << "AudioGenerator::~AudioGenerator" << endl;
Chris@177 69 #endif
Chris@43 70 }
Chris@43 71
Chris@108 72 void
Chris@108 73 AudioGenerator::initialiseSampleDir()
Chris@43 74 {
Chris@108 75 if (m_sampleDir != "") return;
Chris@108 76
Chris@108 77 try {
Chris@108 78 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
Chris@108 79 } catch (DirectoryCreationFailed f) {
Chris@293 80 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
Chris@108 81 << " Failed to create temporary sample directory"
Chris@293 82 << endl;
Chris@108 83 m_sampleDir = "";
Chris@108 84 return;
Chris@108 85 }
Chris@108 86
Chris@108 87 QDir sampleResourceDir(":/samples", "*.wav");
Chris@108 88
Chris@108 89 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
Chris@108 90
Chris@108 91 QString fileName(sampleResourceDir[i]);
Chris@108 92 QFile file(sampleResourceDir.filePath(fileName));
Chris@151 93 QString target = QDir(m_sampleDir).filePath(fileName);
Chris@108 94
Chris@151 95 if (!file.copy(target)) {
Chris@293 96 cerr << "WARNING: AudioGenerator::getSampleDir: "
Chris@294 97 << "Unable to copy " << fileName
Chris@108 98 << " into temporary directory \""
Chris@293 99 << m_sampleDir << "\"" << endl;
Chris@151 100 } else {
Chris@151 101 QFile tf(target);
Chris@151 102 tf.setPermissions(tf.permissions() |
Chris@151 103 QFile::WriteOwner |
Chris@151 104 QFile::WriteUser);
Chris@108 105 }
Chris@43 106 }
Chris@43 107 }
Chris@43 108
Chris@43 109 bool
Chris@43 110 AudioGenerator::addModel(Model *model)
Chris@43 111 {
Chris@43 112 if (m_sourceSampleRate == 0) {
Chris@43 113
Chris@43 114 m_sourceSampleRate = model->getSampleRate();
Chris@43 115
Chris@43 116 } else {
Chris@43 117
Chris@43 118 DenseTimeValueModel *dtvm =
Chris@43 119 dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 120
Chris@43 121 if (dtvm) {
Chris@43 122 m_sourceSampleRate = model->getSampleRate();
Chris@43 123 return true;
Chris@43 124 }
Chris@43 125 }
Chris@307 126
Chris@418 127 const Playable *playable = model;
Chris@418 128 if (!playable || !playable->canPlay()) return 0;
Chris@418 129
Chris@418 130 PlayParameters *parameters =
Chris@418 131 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@418 132
Chris@418 133 bool willPlay = !parameters->isPlayMuted();
Chris@418 134
Chris@313 135 if (usesClipMixer(model)) {
Chris@313 136 ClipMixer *mixer = makeClipMixerFor(model);
Chris@313 137 if (mixer) {
Chris@313 138 QMutexLocker locker(&m_mutex);
Chris@313 139 m_clipMixerMap[model] = mixer;
Chris@418 140 return willPlay;
Chris@313 141 }
Chris@313 142 }
Chris@313 143
Chris@313 144 if (usesContinuousSynth(model)) {
Chris@313 145 ContinuousSynth *synth = makeSynthFor(model);
Chris@313 146 if (synth) {
Chris@313 147 QMutexLocker locker(&m_mutex);
Chris@313 148 m_continuousSynthMap[model] = synth;
Chris@418 149 return willPlay;
Chris@313 150 }
Chris@43 151 }
Chris@307 152
Chris@43 153 return false;
Chris@43 154 }
Chris@43 155
Chris@43 156 void
Chris@309 157 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
Chris@43 158 {
Chris@108 159 const Model *model = dynamic_cast<const Model *>(playable);
Chris@108 160 if (!model) {
Chris@309 161 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
Chris@108 162 << playable << " is not a supported model type"
Chris@293 163 << endl;
Chris@108 164 return;
Chris@108 165 }
Chris@108 166
Chris@307 167 if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
Chris@307 168
Chris@307 169 ClipMixer *mixer = makeClipMixerFor(model);
Chris@307 170 if (mixer) {
Chris@43 171 QMutexLocker locker(&m_mutex);
Chris@307 172 m_clipMixerMap[model] = mixer;
Chris@43 173 }
Chris@43 174 }
Chris@308 175
Chris@313 176 bool
Chris@313 177 AudioGenerator::usesClipMixer(const Model *model)
Chris@43 178 {
Chris@313 179 bool clip =
Chris@313 180 (qobject_cast<const SparseOneDimensionalModel *>(model) ||
Chris@313 181 qobject_cast<const NoteModel *>(model) ||
Chris@313 182 qobject_cast<const FlexiNoteModel *>(model));
Chris@313 183 return clip;
Chris@43 184 }
Chris@43 185
Chris@313 186 bool
Chris@349 187 AudioGenerator::wantsQuieterClips(const Model *model)
Chris@349 188 {
Chris@349 189 // basically, anything that usually has sustain (like notes) or
Chris@349 190 // often has multiple sounds at once (like notes) wants to use a
Chris@349 191 // quieter level than simple click tracks
Chris@349 192 bool does =
Chris@349 193 (qobject_cast<const NoteModel *>(model) ||
Chris@349 194 qobject_cast<const FlexiNoteModel *>(model));
Chris@349 195 return does;
Chris@349 196 }
Chris@349 197
Chris@349 198 bool
Chris@313 199 AudioGenerator::usesContinuousSynth(const Model *model)
Chris@43 200 {
Chris@313 201 bool cont =
Chris@313 202 (qobject_cast<const SparseTimeValueModel *>(model));
Chris@313 203 return cont;
Chris@313 204 }
Chris@313 205
Chris@307 206 ClipMixer *
Chris@307 207 AudioGenerator::makeClipMixerFor(const Model *model)
Chris@43 208 {
Chris@309 209 QString clipId;
Chris@43 210
Chris@108 211 const Playable *playable = model;
Chris@108 212 if (!playable || !playable->canPlay()) return 0;
Chris@108 213
Chris@43 214 PlayParameters *parameters =
Chris@108 215 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 216 if (parameters) {
Chris@309 217 clipId = parameters->getPlayClipId();
Chris@43 218 }
Chris@43 219
Chris@445 220 #ifdef DEBUG_AUDIO_GENERATOR
Chris@309 221 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
Chris@445 222 #endif
Chris@276 223
Chris@309 224 if (clipId == "") {
Chris@308 225 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
Chris@276 226 return 0;
Chris@276 227 }
Chris@43 228
Chris@308 229 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
Chris@308 230 m_sourceSampleRate,
Chris@308 231 m_processingBlockSize);
Chris@307 232
Chris@436 233 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required
Chris@307 234
Chris@309 235 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
Chris@307 236
Chris@436 237 double level = wantsQuieterClips(model) ? 0.5 : 1.0;
Chris@349 238 if (!mixer->loadClipData(clipPath, clipF0, level)) {
Chris@308 239 delete mixer;
Chris@43 240 return 0;
Chris@43 241 }
Chris@43 242
Chris@445 243 #ifdef DEBUG_AUDIO_GENERATOR
Chris@309 244 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
Chris@445 245 #endif
Chris@43 246
Chris@308 247 return mixer;
Chris@308 248 }
Chris@43 249
Chris@313 250 ContinuousSynth *
Chris@313 251 AudioGenerator::makeSynthFor(const Model *model)
Chris@313 252 {
Chris@313 253 const Playable *playable = model;
Chris@313 254 if (!playable || !playable->canPlay()) return 0;
Chris@313 255
Chris@313 256 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
Chris@313 257 m_sourceSampleRate,
rmb456@323 258 m_processingBlockSize,
rmb456@323 259 m_waveType);
Chris@313 260
Chris@445 261 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 262 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
Chris@445 263 #endif
Chris@313 264
Chris@313 265 return synth;
Chris@313 266 }
Chris@313 267
Chris@43 268 void
Chris@43 269 AudioGenerator::removeModel(Model *model)
Chris@43 270 {
Chris@43 271 SparseOneDimensionalModel *sodm =
Chris@43 272 dynamic_cast<SparseOneDimensionalModel *>(model);
Chris@43 273 if (!sodm) return; // nothing to do
Chris@43 274
Chris@43 275 QMutexLocker locker(&m_mutex);
Chris@43 276
Chris@308 277 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
Chris@43 278
Chris@308 279 ClipMixer *mixer = m_clipMixerMap[sodm];
Chris@308 280 m_clipMixerMap.erase(sodm);
Chris@308 281 delete mixer;
Chris@43 282 }
Chris@43 283
Chris@43 284 void
Chris@43 285 AudioGenerator::clearModels()
Chris@43 286 {
Chris@43 287 QMutexLocker locker(&m_mutex);
Chris@308 288
Chris@308 289 while (!m_clipMixerMap.empty()) {
Chris@308 290 ClipMixer *mixer = m_clipMixerMap.begin()->second;
Chris@308 291 m_clipMixerMap.erase(m_clipMixerMap.begin());
Chris@308 292 delete mixer;
Chris@43 293 }
Chris@43 294 }
Chris@43 295
Chris@43 296 void
Chris@43 297 AudioGenerator::reset()
Chris@43 298 {
Chris@43 299 QMutexLocker locker(&m_mutex);
Chris@308 300
Chris@445 301 #ifdef DEBUG_AUDIO_GENERATOR
Chris@397 302 cerr << "AudioGenerator::reset()" << endl;
Chris@445 303 #endif
Chris@397 304
Chris@308 305 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@43 306 if (i->second) {
Chris@308 307 i->second->reset();
Chris@43 308 }
Chris@43 309 }
Chris@43 310
Chris@43 311 m_noteOffs.clear();
Chris@43 312 }
Chris@43 313
Chris@43 314 void
Chris@366 315 AudioGenerator::setTargetChannelCount(int targetChannelCount)
Chris@43 316 {
Chris@43 317 if (m_targetChannelCount == targetChannelCount) return;
Chris@43 318
Chris@233 319 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
Chris@43 320
Chris@43 321 QMutexLocker locker(&m_mutex);
Chris@43 322 m_targetChannelCount = targetChannelCount;
Chris@43 323
Chris@308 324 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@308 325 if (i->second) i->second->setChannelCount(targetChannelCount);
Chris@43 326 }
Chris@43 327 }
Chris@43 328
Chris@436 329 sv_frame_t
Chris@43 330 AudioGenerator::getBlockSize() const
Chris@43 331 {
Chris@305 332 return m_processingBlockSize;
Chris@43 333 }
Chris@43 334
Chris@43 335 void
Chris@43 336 AudioGenerator::setSoloModelSet(std::set<Model *> s)
Chris@43 337 {
Chris@43 338 QMutexLocker locker(&m_mutex);
Chris@43 339
Chris@43 340 m_soloModelSet = s;
Chris@43 341 m_soloing = true;
Chris@43 342 }
Chris@43 343
Chris@43 344 void
Chris@43 345 AudioGenerator::clearSoloModelSet()
Chris@43 346 {
Chris@43 347 QMutexLocker locker(&m_mutex);
Chris@43 348
Chris@43 349 m_soloModelSet.clear();
Chris@43 350 m_soloing = false;
Chris@43 351 }
Chris@43 352
Chris@436 353 sv_frame_t
Chris@436 354 AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
Chris@436 355 float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut)
Chris@43 356 {
Chris@43 357 if (m_sourceSampleRate == 0) {
Chris@293 358 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
Chris@43 359 return frameCount;
Chris@43 360 }
Chris@43 361
Chris@43 362 QMutexLocker locker(&m_mutex);
Chris@43 363
Chris@108 364 Playable *playable = model;
Chris@108 365 if (!playable || !playable->canPlay()) return frameCount;
Chris@108 366
Chris@43 367 PlayParameters *parameters =
Chris@108 368 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 369 if (!parameters) return frameCount;
Chris@43 370
Chris@43 371 bool playing = !parameters->isPlayMuted();
Chris@43 372 if (!playing) {
Chris@43 373 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 374 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
Chris@43 375 #endif
Chris@43 376 return frameCount;
Chris@43 377 }
Chris@43 378
Chris@43 379 if (m_soloing) {
Chris@43 380 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
Chris@43 381 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 382 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
Chris@43 383 #endif
Chris@43 384 return frameCount;
Chris@43 385 }
Chris@43 386 }
Chris@43 387
Chris@43 388 float gain = parameters->getPlayGain();
Chris@43 389 float pan = parameters->getPlayPan();
Chris@43 390
Chris@43 391 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 392 if (dtvm) {
Chris@43 393 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
Chris@43 394 buffer, gain, pan, fadeIn, fadeOut);
Chris@43 395 }
Chris@43 396
Chris@313 397 if (usesClipMixer(model)) {
Chris@313 398 return mixClipModel(model, startFrame, frameCount,
Chris@313 399 buffer, gain, pan);
Chris@313 400 }
Chris@43 401
Chris@313 402 if (usesContinuousSynth(model)) {
Chris@313 403 return mixContinuousSynthModel(model, startFrame, frameCount,
Chris@313 404 buffer, gain, pan);
Chris@43 405 }
Chris@43 406
Chris@276 407 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
Chris@276 408
Chris@43 409 return frameCount;
Chris@43 410 }
Chris@43 411
Chris@436 412 sv_frame_t
Chris@43 413 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
Chris@436 414 sv_frame_t startFrame, sv_frame_t frames,
Chris@43 415 float **buffer, float gain, float pan,
Chris@436 416 sv_frame_t fadeIn, sv_frame_t fadeOut)
Chris@43 417 {
Chris@436 418 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut);
Chris@43 419
Chris@366 420 int modelChannels = dtvm->getChannelCount();
Chris@80 421
Chris@382 422 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
Chris@80 423
Chris@382 424 for (int c = 0; c < m_channelBufCount; ++c) {
Chris@382 425 delete[] m_channelBuffer[c];
Chris@80 426 }
Chris@80 427
Chris@382 428 delete[] m_channelBuffer;
Chris@382 429 m_channelBuffer = new float *[modelChannels];
Chris@80 430
Chris@366 431 for (int c = 0; c < modelChannels; ++c) {
Chris@382 432 m_channelBuffer[c] = new float[maxFrames];
Chris@80 433 }
Chris@80 434
Chris@382 435 m_channelBufCount = modelChannels;
Chris@382 436 m_channelBufSiz = maxFrames;
Chris@43 437 }
Chris@80 438
Chris@436 439 sv_frame_t got = 0;
Chris@80 440
Chris@80 441 if (startFrame >= fadeIn/2) {
Chris@460 442
Chris@460 443 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
Chris@460 444 startFrame - fadeIn/2,
Chris@460 445 frames + fadeOut/2 + fadeIn/2);
Chris@460 446
Chris@460 447 for (int c = 0; c < modelChannels; ++c) {
Chris@460 448 copy(data[c].begin(), data[c].end(), m_channelBuffer[c]);
Chris@460 449 }
Chris@460 450
Chris@461 451 got = data[0].size();
Chris@460 452
Chris@80 453 } else {
Chris@436 454 sv_frame_t missing = fadeIn/2 - startFrame;
Chris@80 455
Chris@382 456 if (missing > 0) {
Chris@382 457 cerr << "note: channelBufSiz = " << m_channelBufSiz
Chris@382 458 << ", frames + fadeOut/2 = " << frames + fadeOut/2
Chris@382 459 << ", startFrame = " << startFrame
Chris@382 460 << ", missing = " << missing << endl;
Chris@80 461 }
Chris@80 462
Chris@460 463 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
Chris@460 464 startFrame,
Chris@460 465 frames + fadeOut/2);
Chris@366 466 for (int c = 0; c < modelChannels; ++c) {
Chris@460 467 copy(data[c].begin(), data[c].end(), m_channelBuffer[c] + missing);
Chris@80 468 }
Chris@80 469
Chris@461 470 got = data[0].size() + missing;
Chris@80 471 }
Chris@43 472
Chris@366 473 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@43 474
Chris@366 475 int sourceChannel = (c % modelChannels);
Chris@43 476
Chris@233 477 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
Chris@43 478
Chris@43 479 float channelGain = gain;
Chris@43 480 if (pan != 0.0) {
Chris@43 481 if (c == 0) {
Chris@436 482 if (pan > 0.0) channelGain *= 1.0f - pan;
Chris@43 483 } else {
Chris@436 484 if (pan < 0.0) channelGain *= pan + 1.0f;
Chris@43 485 }
Chris@43 486 }
Chris@43 487
Chris@436 488 for (sv_frame_t i = 0; i < fadeIn/2; ++i) {
Chris@43 489 float *back = buffer[c];
Chris@43 490 back -= fadeIn/2;
Chris@436 491 back[i] +=
Chris@436 492 (channelGain * m_channelBuffer[sourceChannel][i] * float(i))
Chris@436 493 / float(fadeIn);
Chris@43 494 }
Chris@43 495
Chris@436 496 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) {
Chris@43 497 float mult = channelGain;
Chris@43 498 if (i < fadeIn/2) {
Chris@436 499 mult = (mult * float(i)) / float(fadeIn);
Chris@43 500 }
Chris@43 501 if (i > frames - fadeOut/2) {
Chris@436 502 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut);
Chris@43 503 }
Chris@382 504 float val = m_channelBuffer[sourceChannel][i];
Chris@80 505 if (i >= got) val = 0.f;
Chris@80 506 buffer[c][i] += mult * val;
Chris@43 507 }
Chris@43 508 }
Chris@43 509
Chris@43 510 return got;
Chris@43 511 }
Chris@43 512
Chris@436 513 sv_frame_t
Chris@313 514 AudioGenerator::mixClipModel(Model *model,
Chris@436 515 sv_frame_t startFrame, sv_frame_t frames,
Chris@313 516 float **buffer, float gain, float pan)
Chris@43 517 {
Chris@308 518 ClipMixer *clipMixer = m_clipMixerMap[model];
Chris@308 519 if (!clipMixer) return 0;
Chris@43 520
Chris@436 521 int blocks = int(frames / m_processingBlockSize);
Chris@43 522
Chris@313 523 //!!! todo: the below -- it matters
Chris@313 524
Chris@43 525 //!!! hang on -- the fact that the audio callback play source's
Chris@43 526 //buffer is a multiple of the plugin's buffer size doesn't mean
Chris@43 527 //that we always get called for a multiple of it here (because it
Chris@43 528 //also depends on the JACK block size). how should we ensure that
Chris@43 529 //all models write the same amount in to the mix, and that we
Chris@43 530 //always have a multiple of the plugin buffer size? I guess this
Chris@43 531 //class has to be queryable for the plugin buffer size & the
Chris@43 532 //callback play source has to use that as a multiple for all the
Chris@43 533 //calls to mixModel
Chris@43 534
Chris@436 535 sv_frame_t got = blocks * m_processingBlockSize;
Chris@43 536
Chris@43 537 #ifdef DEBUG_AUDIO_GENERATOR
Chris@442 538 cout << "mixModel [clip]: start " << startFrame << ", frames " << frames
Chris@442 539 << ", blocks " << blocks << ", have " << m_noteOffs.size()
Chris@442 540 << " note-offs" << endl;
Chris@43 541 #endif
Chris@43 542
Chris@308 543 ClipMixer::NoteStart on;
Chris@308 544 ClipMixer::NoteEnd off;
Chris@43 545
Chris@275 546 NoteOffSet &noteOffs = m_noteOffs[model];
Chris@43 547
Chris@308 548 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@308 549
Chris@366 550 for (int i = 0; i < blocks; ++i) {
Chris@43 551
Chris@436 552 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
Chris@43 553
Chris@299 554 NoteList notes;
Chris@299 555 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
Chris@299 556 if (exportable) {
Chris@366 557 notes = exportable->getNotesWithin(reqStart,
Chris@366 558 reqStart + m_processingBlockSize);
Chris@299 559 }
Chris@43 560
Chris@308 561 std::vector<ClipMixer::NoteStart> starts;
Chris@308 562 std::vector<ClipMixer::NoteEnd> ends;
Chris@43 563
Chris@275 564 for (NoteList::const_iterator ni = notes.begin();
Chris@275 565 ni != notes.end(); ++ni) {
Chris@43 566
Chris@436 567 sv_frame_t noteFrame = ni->start;
Chris@43 568
Chris@275 569 if (noteFrame < reqStart ||
Chris@305 570 noteFrame >= reqStart + m_processingBlockSize) continue;
Chris@43 571
Chris@43 572 while (noteOffs.begin() != noteOffs.end() &&
Chris@275 573 noteOffs.begin()->frame <= noteFrame) {
Chris@43 574
Chris@436 575 sv_frame_t eventFrame = noteOffs.begin()->frame;
Chris@308 576 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 577
Chris@308 578 off.frameOffset = eventFrame - reqStart;
Chris@308 579 off.frequency = noteOffs.begin()->frequency;
Chris@43 580
Chris@43 581 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 582 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 583 #endif
Chris@43 584
Chris@308 585 ends.push_back(off);
Chris@43 586 noteOffs.erase(noteOffs.begin());
Chris@43 587 }
Chris@43 588
Chris@308 589 on.frameOffset = noteFrame - reqStart;
Chris@308 590 on.frequency = ni->getFrequency();
Chris@436 591 on.level = float(ni->velocity) / 127.0f;
Chris@308 592 on.pan = pan;
Chris@43 593
Chris@43 594 #ifdef DEBUG_AUDIO_GENERATOR
Chris@346 595 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
Chris@43 596 #endif
Chris@43 597
Chris@308 598 starts.push_back(on);
Chris@275 599 noteOffs.insert
Chris@308 600 (NoteOff(on.frequency, noteFrame + ni->duration));
Chris@43 601 }
Chris@43 602
Chris@43 603 while (noteOffs.begin() != noteOffs.end() &&
Chris@308 604 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
Chris@43 605
Chris@436 606 sv_frame_t eventFrame = noteOffs.begin()->frame;
Chris@308 607 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 608
Chris@308 609 off.frameOffset = eventFrame - reqStart;
Chris@308 610 off.frequency = noteOffs.begin()->frequency;
Chris@43 611
Chris@43 612 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 613 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 614 #endif
Chris@43 615
Chris@308 616 ends.push_back(off);
Chris@308 617 noteOffs.erase(noteOffs.begin());
Chris@43 618 }
Chris@43 619
Chris@366 620 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@308 621 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@308 622 }
Chris@43 623
Chris@308 624 clipMixer->mix(bufferIndexes, gain, starts, ends);
Chris@308 625 }
Chris@43 626
Chris@308 627 delete[] bufferIndexes;
Chris@43 628
Chris@43 629 return got;
Chris@43 630 }
Chris@313 631
Chris@436 632 sv_frame_t
Chris@313 633 AudioGenerator::mixContinuousSynthModel(Model *model,
Chris@436 634 sv_frame_t startFrame,
Chris@436 635 sv_frame_t frames,
Chris@313 636 float **buffer,
Chris@313 637 float gain,
Chris@313 638 float pan)
Chris@313 639 {
Chris@313 640 ContinuousSynth *synth = m_continuousSynthMap[model];
Chris@313 641 if (!synth) return 0;
Chris@313 642
Chris@313 643 // only type we support here at the moment
Chris@313 644 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
Chris@313 645 if (stvm->getScaleUnits() != "Hz") return 0;
Chris@313 646
Chris@436 647 int blocks = int(frames / m_processingBlockSize);
Chris@313 648
Chris@313 649 //!!! todo: see comment in mixClipModel
Chris@313 650
Chris@436 651 sv_frame_t got = blocks * m_processingBlockSize;
Chris@313 652
Chris@313 653 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 654 cout << "mixModel [synth]: frames " << frames
Chris@313 655 << ", blocks " << blocks << endl;
Chris@313 656 #endif
Chris@313 657
Chris@313 658 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@313 659
Chris@366 660 for (int i = 0; i < blocks; ++i) {
Chris@313 661
Chris@436 662 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
Chris@313 663
Chris@366 664 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@313 665 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@313 666 }
Chris@313 667
Chris@313 668 SparseTimeValueModel::PointList points =
Chris@313 669 stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
Chris@313 670
Chris@313 671 // by default, repeat last frequency
Chris@313 672 float f0 = 0.f;
Chris@313 673
Chris@313 674 // go straight to the last freq that is genuinely in this range
Chris@313 675 for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
Chris@313 676 itr != points.begin(); ) {
Chris@313 677 --itr;
Chris@313 678 if (itr->frame >= reqStart &&
Chris@313 679 itr->frame < reqStart + m_processingBlockSize) {
Chris@313 680 f0 = itr->value;
Chris@313 681 break;
Chris@313 682 }
Chris@313 683 }
Chris@313 684
Chris@314 685 // if we found no such frequency and the next point is further
Chris@314 686 // away than twice the model resolution, go silent (same
Chris@314 687 // criterion TimeValueLayer uses for ending a discrete curve
Chris@314 688 // segment)
Chris@314 689 if (f0 == 0.f) {
Chris@314 690 SparseTimeValueModel::PointList nextPoints =
Chris@314 691 stvm->getNextPoints(reqStart + m_processingBlockSize);
Chris@314 692 if (nextPoints.empty() ||
Chris@314 693 nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
Chris@314 694 f0 = -1.f;
Chris@314 695 }
Chris@314 696 }
Chris@314 697
Chris@315 698 // cerr << "f0 = " << f0 << endl;
Chris@313 699
Chris@313 700 synth->mix(bufferIndexes,
Chris@313 701 gain,
Chris@313 702 pan,
Chris@313 703 f0);
Chris@313 704 }
Chris@313 705
Chris@313 706 delete[] bufferIndexes;
Chris@313 707
Chris@313 708 return got;
Chris@313 709 }
Chris@313 710