annotate audio/AudioGenerator.cpp @ 729:15da3ab3d416 csv-export-dialog

Split export functions into file-type-specific ones; accept various CSV-specific arguments
author Chris Cannam
date Tue, 14 Jan 2020 15:42:46 +0000
parents 3c5dc95bea91
children
rev   line source
Chris@43 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@43 2
Chris@43 3 /*
Chris@43 4 Sonic Visualiser
Chris@43 5 An audio file viewer and annotation editor.
Chris@43 6 Centre for Digital Music, Queen Mary, University of London.
Chris@43 7 This file copyright 2006 Chris Cannam.
Chris@43 8
Chris@43 9 This program is free software; you can redistribute it and/or
Chris@43 10 modify it under the terms of the GNU General Public License as
Chris@43 11 published by the Free Software Foundation; either version 2 of the
Chris@43 12 License, or (at your option) any later version. See the file
Chris@43 13 COPYING included with this distribution for more information.
Chris@43 14 */
Chris@43 15
Chris@43 16 #include "AudioGenerator.h"
Chris@43 17
Chris@43 18 #include "base/TempDirectory.h"
Chris@43 19 #include "base/PlayParameters.h"
Chris@43 20 #include "base/PlayParameterRepository.h"
Chris@43 21 #include "base/Pitch.h"
Chris@43 22 #include "base/Exceptions.h"
Chris@43 23
Chris@43 24 #include "data/model/NoteModel.h"
Chris@43 25 #include "data/model/DenseTimeValueModel.h"
Chris@313 26 #include "data/model/SparseTimeValueModel.h"
Chris@43 27 #include "data/model/SparseOneDimensionalModel.h"
Chris@645 28 #include "base/NoteData.h"
Chris@43 29
Chris@307 30 #include "ClipMixer.h"
Chris@313 31 #include "ContinuousSynth.h"
Chris@307 32
Chris@43 33 #include <iostream>
Chris@167 34 #include <cmath>
Chris@43 35
Chris@43 36 #include <QDir>
Chris@43 37 #include <QFile>
Chris@43 38
Chris@436 39 const sv_frame_t
Chris@315 40 AudioGenerator::m_processingBlockSize = 1024;
Chris@43 41
Chris@43 42 QString
Chris@43 43 AudioGenerator::m_sampleDir = "";
Chris@43 44
Chris@43 45 //#define DEBUG_AUDIO_GENERATOR 1
Chris@43 46
Chris@43 47 AudioGenerator::AudioGenerator() :
Chris@43 48 m_sourceSampleRate(0),
Chris@43 49 m_targetChannelCount(1),
Chris@348 50 m_waveType(0),
Chris@382 51 m_soloing(false),
Chris@636 52 m_channelBuffer(nullptr),
Chris@382 53 m_channelBufSiz(0),
Chris@382 54 m_channelBufCount(0)
Chris@43 55 {
Chris@108 56 initialiseSampleDir();
Chris@43 57
Chris@43 58 connect(PlayParameterRepository::getInstance(),
Chris@682 59 SIGNAL(playClipIdChanged(int, QString)),
Chris@43 60 this,
Chris@682 61 SLOT(playClipIdChanged(int, QString)));
Chris@43 62 }
Chris@43 63
Chris@43 64 AudioGenerator::~AudioGenerator()
Chris@43 65 {
Chris@177 66 #ifdef DEBUG_AUDIO_GENERATOR
Chris@596 67 cerr << "AudioGenerator::~AudioGenerator" << endl;
Chris@177 68 #endif
Chris@593 69
Chris@593 70 for (int i = 0; i < m_channelBufCount; ++i) {
Chris@593 71 delete[] m_channelBuffer[i];
Chris@593 72 }
Chris@593 73 delete[] m_channelBuffer;
Chris@43 74 }
Chris@43 75
Chris@108 76 void
Chris@108 77 AudioGenerator::initialiseSampleDir()
Chris@43 78 {
Chris@108 79 if (m_sampleDir != "") return;
Chris@108 80
Chris@108 81 try {
Chris@108 82 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
Chris@598 83 } catch (const DirectoryCreationFailed &f) {
Chris@293 84 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
Chris@108 85 << " Failed to create temporary sample directory"
Chris@293 86 << endl;
Chris@108 87 m_sampleDir = "";
Chris@108 88 return;
Chris@108 89 }
Chris@108 90
Chris@108 91 QDir sampleResourceDir(":/samples", "*.wav");
Chris@108 92
Chris@108 93 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
Chris@108 94
Chris@108 95 QString fileName(sampleResourceDir[i]);
Chris@108 96 QFile file(sampleResourceDir.filePath(fileName));
Chris@151 97 QString target = QDir(m_sampleDir).filePath(fileName);
Chris@108 98
Chris@151 99 if (!file.copy(target)) {
Chris@293 100 cerr << "WARNING: AudioGenerator::getSampleDir: "
Chris@294 101 << "Unable to copy " << fileName
Chris@108 102 << " into temporary directory \""
Chris@293 103 << m_sampleDir << "\"" << endl;
Chris@151 104 } else {
Chris@151 105 QFile tf(target);
Chris@151 106 tf.setPermissions(tf.permissions() |
Chris@151 107 QFile::WriteOwner |
Chris@151 108 QFile::WriteUser);
Chris@108 109 }
Chris@43 110 }
Chris@43 111 }
Chris@43 112
Chris@43 113 bool
Chris@682 114 AudioGenerator::addModel(ModelId modelId)
Chris@43 115 {
Chris@682 116 auto model = ModelById::get(modelId);
Chris@682 117 if (!model) return false;
Chris@682 118 if (!model->canPlay()) return false;
Chris@682 119
Chris@43 120 if (m_sourceSampleRate == 0) {
Chris@43 121
Chris@595 122 m_sourceSampleRate = model->getSampleRate();
Chris@43 123
Chris@43 124 } else {
Chris@43 125
Chris@682 126 auto dtvm = std::dynamic_pointer_cast<DenseTimeValueModel>(model);
Chris@43 127
Chris@595 128 if (dtvm) {
Chris@595 129 m_sourceSampleRate = model->getSampleRate();
Chris@595 130 return true;
Chris@595 131 }
Chris@43 132 }
Chris@307 133
Chris@686 134 auto parameters =
Chris@682 135 PlayParameterRepository::getInstance()->getPlayParameters
Chris@682 136 (modelId.untyped);
Chris@418 137
Chris@682 138 if (!parameters) {
Chris@682 139 SVCERR << "WARNING: Model with canPlay true is not known to PlayParameterRepository" << endl;
Chris@682 140 return false;
Chris@682 141 }
Chris@418 142
Chris@418 143 bool willPlay = !parameters->isPlayMuted();
Chris@418 144
Chris@682 145 if (usesClipMixer(modelId)) {
Chris@682 146 ClipMixer *mixer = makeClipMixerFor(modelId);
Chris@313 147 if (mixer) {
Chris@313 148 QMutexLocker locker(&m_mutex);
Chris@682 149 m_clipMixerMap[modelId] = mixer;
Chris@418 150 return willPlay;
Chris@313 151 }
Chris@313 152 }
Chris@313 153
Chris@682 154 if (usesContinuousSynth(modelId)) {
Chris@682 155 ContinuousSynth *synth = makeSynthFor(modelId);
Chris@313 156 if (synth) {
Chris@313 157 QMutexLocker locker(&m_mutex);
Chris@682 158 m_continuousSynthMap[modelId] = synth;
Chris@418 159 return willPlay;
Chris@313 160 }
Chris@43 161 }
Chris@307 162
Chris@43 163 return false;
Chris@43 164 }
Chris@43 165
Chris@43 166 void
Chris@682 167 AudioGenerator::playClipIdChanged(int playableId, QString)
Chris@43 168 {
Chris@682 169 ModelId modelId;
Chris@682 170 modelId.untyped = playableId;
Chris@682 171
Chris@682 172 if (m_clipMixerMap.find(modelId) == m_clipMixerMap.end()) {
Chris@616 173 return;
Chris@616 174 }
Chris@307 175
Chris@682 176 ClipMixer *mixer = makeClipMixerFor(modelId);
Chris@307 177 if (mixer) {
Chris@43 178 QMutexLocker locker(&m_mutex);
Chris@682 179 ClipMixer *oldMixer = m_clipMixerMap[modelId];
Chris@682 180 m_clipMixerMap[modelId] = mixer;
Chris@682 181 delete oldMixer;
Chris@43 182 }
Chris@43 183 }
Chris@308 184
Chris@313 185 bool
Chris@682 186 AudioGenerator::usesClipMixer(ModelId modelId)
Chris@43 187 {
Chris@313 188 bool clip =
Chris@682 189 (ModelById::isa<SparseOneDimensionalModel>(modelId) ||
Chris@682 190 ModelById::isa<NoteModel>(modelId));
Chris@313 191 return clip;
Chris@43 192 }
Chris@43 193
Chris@313 194 bool
Chris@682 195 AudioGenerator::wantsQuieterClips(ModelId modelId)
Chris@349 196 {
Chris@349 197 // basically, anything that usually has sustain (like notes) or
Chris@349 198 // often has multiple sounds at once (like notes) wants to use a
Chris@349 199 // quieter level than simple click tracks
Chris@682 200 bool does = (ModelById::isa<NoteModel>(modelId));
Chris@349 201 return does;
Chris@349 202 }
Chris@349 203
Chris@349 204 bool
Chris@682 205 AudioGenerator::usesContinuousSynth(ModelId modelId)
Chris@43 206 {
Chris@682 207 bool cont = (ModelById::isa<SparseTimeValueModel>(modelId));
Chris@313 208 return cont;
Chris@313 209 }
Chris@313 210
Chris@307 211 ClipMixer *
Chris@682 212 AudioGenerator::makeClipMixerFor(ModelId modelId)
Chris@43 213 {
Chris@309 214 QString clipId;
Chris@43 215
Chris@686 216 auto parameters =
Chris@682 217 PlayParameterRepository::getInstance()->getPlayParameters
Chris@682 218 (modelId.untyped);
Chris@43 219 if (parameters) {
Chris@309 220 clipId = parameters->getPlayClipId();
Chris@43 221 }
Chris@43 222
Chris@445 223 #ifdef DEBUG_AUDIO_GENERATOR
Chris@682 224 std::cerr << "AudioGenerator::makeClipMixerFor(" << modelId << "): sample id = " << clipId << std::endl;
Chris@445 225 #endif
Chris@276 226
Chris@309 227 if (clipId == "") {
Chris@682 228 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << modelId << "): no sample, skipping" << endl;
Chris@636 229 return nullptr;
Chris@276 230 }
Chris@43 231
Chris@308 232 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
Chris@308 233 m_sourceSampleRate,
Chris@308 234 m_processingBlockSize);
Chris@307 235
Chris@436 236 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required
Chris@307 237
Chris@309 238 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
Chris@307 239
Chris@682 240 double level = wantsQuieterClips(modelId) ? 0.5 : 1.0;
Chris@349 241 if (!mixer->loadClipData(clipPath, clipF0, level)) {
Chris@308 242 delete mixer;
Chris@636 243 return nullptr;
Chris@43 244 }
Chris@43 245
Chris@445 246 #ifdef DEBUG_AUDIO_GENERATOR
Chris@309 247 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
Chris@445 248 #endif
Chris@43 249
Chris@308 250 return mixer;
Chris@308 251 }
Chris@43 252
Chris@313 253 ContinuousSynth *
Chris@682 254 AudioGenerator::makeSynthFor(ModelId)
Chris@313 255 {
Chris@313 256 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
Chris@313 257 m_sourceSampleRate,
rmb456@323 258 m_processingBlockSize,
rmb456@323 259 m_waveType);
Chris@313 260
Chris@445 261 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 262 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
Chris@445 263 #endif
Chris@313 264
Chris@313 265 return synth;
Chris@313 266 }
Chris@313 267
Chris@43 268 void
Chris@682 269 AudioGenerator::removeModel(ModelId modelId)
Chris@43 270 {
Chris@43 271 QMutexLocker locker(&m_mutex);
Chris@43 272
Chris@682 273 if (m_clipMixerMap.find(modelId) == m_clipMixerMap.end()) {
Chris@616 274 return;
Chris@616 275 }
Chris@43 276
Chris@682 277 ClipMixer *mixer = m_clipMixerMap[modelId];
Chris@682 278 m_clipMixerMap.erase(modelId);
Chris@308 279 delete mixer;
Chris@43 280 }
Chris@43 281
Chris@43 282 void
Chris@43 283 AudioGenerator::clearModels()
Chris@43 284 {
Chris@43 285 QMutexLocker locker(&m_mutex);
Chris@308 286
Chris@308 287 while (!m_clipMixerMap.empty()) {
Chris@308 288 ClipMixer *mixer = m_clipMixerMap.begin()->second;
Chris@595 289 m_clipMixerMap.erase(m_clipMixerMap.begin());
Chris@595 290 delete mixer;
Chris@43 291 }
Chris@43 292 }
Chris@43 293
Chris@43 294 void
Chris@43 295 AudioGenerator::reset()
Chris@43 296 {
Chris@43 297 QMutexLocker locker(&m_mutex);
Chris@308 298
Chris@445 299 #ifdef DEBUG_AUDIO_GENERATOR
Chris@397 300 cerr << "AudioGenerator::reset()" << endl;
Chris@445 301 #endif
Chris@397 302
Chris@616 303 for (ClipMixerMap::iterator i = m_clipMixerMap.begin();
Chris@616 304 i != m_clipMixerMap.end(); ++i) {
Chris@595 305 if (i->second) {
Chris@595 306 i->second->reset();
Chris@595 307 }
Chris@43 308 }
Chris@43 309
Chris@43 310 m_noteOffs.clear();
Chris@43 311 }
Chris@43 312
Chris@43 313 void
Chris@366 314 AudioGenerator::setTargetChannelCount(int targetChannelCount)
Chris@43 315 {
Chris@43 316 if (m_targetChannelCount == targetChannelCount) return;
Chris@43 317
Chris@233 318 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
Chris@43 319
Chris@43 320 QMutexLocker locker(&m_mutex);
Chris@43 321 m_targetChannelCount = targetChannelCount;
Chris@43 322
Chris@308 323 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@595 324 if (i->second) i->second->setChannelCount(targetChannelCount);
Chris@43 325 }
Chris@43 326 }
Chris@43 327
Chris@436 328 sv_frame_t
Chris@43 329 AudioGenerator::getBlockSize() const
Chris@43 330 {
Chris@305 331 return m_processingBlockSize;
Chris@43 332 }
Chris@43 333
Chris@43 334 void
Chris@682 335 AudioGenerator::setSoloModelSet(std::set<ModelId> s)
Chris@43 336 {
Chris@43 337 QMutexLocker locker(&m_mutex);
Chris@43 338
Chris@43 339 m_soloModelSet = s;
Chris@43 340 m_soloing = true;
Chris@43 341 }
Chris@43 342
Chris@43 343 void
Chris@43 344 AudioGenerator::clearSoloModelSet()
Chris@43 345 {
Chris@43 346 QMutexLocker locker(&m_mutex);
Chris@43 347
Chris@43 348 m_soloModelSet.clear();
Chris@43 349 m_soloing = false;
Chris@43 350 }
Chris@43 351
Chris@436 352 sv_frame_t
Chris@682 353 AudioGenerator::mixModel(ModelId modelId,
Chris@613 354 sv_frame_t startFrame, sv_frame_t frameCount,
Chris@613 355 float **buffer,
Chris@613 356 sv_frame_t fadeIn, sv_frame_t fadeOut)
Chris@43 357 {
Chris@43 358 if (m_sourceSampleRate == 0) {
Chris@595 359 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
Chris@595 360 return frameCount;
Chris@43 361 }
Chris@43 362
Chris@43 363 QMutexLocker locker(&m_mutex);
Chris@43 364
Chris@682 365 auto model = ModelById::get(modelId);
Chris@682 366 if (!model || !model->canPlay()) return frameCount;
Chris@108 367
Chris@686 368 auto parameters =
Chris@682 369 PlayParameterRepository::getInstance()->getPlayParameters
Chris@682 370 (modelId.untyped);
Chris@43 371 if (!parameters) return frameCount;
Chris@43 372
Chris@43 373 bool playing = !parameters->isPlayMuted();
Chris@43 374 if (!playing) {
Chris@43 375 #ifdef DEBUG_AUDIO_GENERATOR
Chris@682 376 cout << "AudioGenerator::mixModel(" << modelId << "): muted" << endl;
Chris@43 377 #endif
Chris@43 378 return frameCount;
Chris@43 379 }
Chris@43 380
Chris@43 381 if (m_soloing) {
Chris@682 382 if (m_soloModelSet.find(modelId) == m_soloModelSet.end()) {
Chris@43 383 #ifdef DEBUG_AUDIO_GENERATOR
Chris@682 384 cout << "AudioGenerator::mixModel(" << modelId << "): not one of the solo'd models" << endl;
Chris@43 385 #endif
Chris@43 386 return frameCount;
Chris@43 387 }
Chris@43 388 }
Chris@43 389
Chris@43 390 float gain = parameters->getPlayGain();
Chris@43 391 float pan = parameters->getPlayPan();
Chris@43 392
Chris@682 393 if (std::dynamic_pointer_cast<DenseTimeValueModel>(model)) {
Chris@682 394 return mixDenseTimeValueModel(modelId, startFrame, frameCount,
Chris@595 395 buffer, gain, pan, fadeIn, fadeOut);
Chris@43 396 }
Chris@43 397
Chris@682 398 if (usesClipMixer(modelId)) {
Chris@682 399 return mixClipModel(modelId, startFrame, frameCount,
Chris@313 400 buffer, gain, pan);
Chris@313 401 }
Chris@43 402
Chris@682 403 if (usesContinuousSynth(modelId)) {
Chris@682 404 return mixContinuousSynthModel(modelId, startFrame, frameCount,
Chris@313 405 buffer, gain, pan);
Chris@43 406 }
Chris@43 407
Chris@682 408 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << modelId << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
Chris@276 409
Chris@43 410 return frameCount;
Chris@43 411 }
Chris@43 412
Chris@436 413 sv_frame_t
Chris@682 414 AudioGenerator::mixDenseTimeValueModel(ModelId modelId,
Chris@595 415 sv_frame_t startFrame, sv_frame_t frames,
Chris@595 416 float **buffer, float gain, float pan,
Chris@595 417 sv_frame_t fadeIn, sv_frame_t fadeOut)
Chris@43 418 {
Chris@436 419 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut);
Chris@43 420
Chris@682 421 auto dtvm = ModelById::getAs<DenseTimeValueModel>(modelId);
Chris@682 422 if (!dtvm) return 0;
Chris@682 423
Chris@366 424 int modelChannels = dtvm->getChannelCount();
Chris@80 425
Chris@382 426 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
Chris@80 427
Chris@382 428 for (int c = 0; c < m_channelBufCount; ++c) {
Chris@382 429 delete[] m_channelBuffer[c];
Chris@80 430 }
Chris@80 431
Chris@595 432 delete[] m_channelBuffer;
Chris@382 433 m_channelBuffer = new float *[modelChannels];
Chris@80 434
Chris@366 435 for (int c = 0; c < modelChannels; ++c) {
Chris@382 436 m_channelBuffer[c] = new float[maxFrames];
Chris@80 437 }
Chris@80 438
Chris@382 439 m_channelBufCount = modelChannels;
Chris@595 440 m_channelBufSiz = maxFrames;
Chris@43 441 }
Chris@80 442
Chris@436 443 sv_frame_t got = 0;
Chris@80 444
Chris@80 445 if (startFrame >= fadeIn/2) {
Chris@460 446
Chris@460 447 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
Chris@460 448 startFrame - fadeIn/2,
Chris@460 449 frames + fadeOut/2 + fadeIn/2);
Chris@460 450
Chris@460 451 for (int c = 0; c < modelChannels; ++c) {
Chris@460 452 copy(data[c].begin(), data[c].end(), m_channelBuffer[c]);
Chris@460 453 }
Chris@460 454
Chris@461 455 got = data[0].size();
Chris@460 456
Chris@80 457 } else {
Chris@436 458 sv_frame_t missing = fadeIn/2 - startFrame;
Chris@80 459
Chris@382 460 if (missing > 0) {
Chris@382 461 cerr << "note: channelBufSiz = " << m_channelBufSiz
Chris@382 462 << ", frames + fadeOut/2 = " << frames + fadeOut/2
Chris@382 463 << ", startFrame = " << startFrame
Chris@382 464 << ", missing = " << missing << endl;
Chris@80 465 }
Chris@80 466
Chris@460 467 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
Chris@460 468 startFrame,
Chris@460 469 frames + fadeOut/2);
Chris@366 470 for (int c = 0; c < modelChannels; ++c) {
Chris@460 471 copy(data[c].begin(), data[c].end(), m_channelBuffer[c] + missing);
Chris@80 472 }
Chris@80 473
Chris@461 474 got = data[0].size() + missing;
Chris@595 475 }
Chris@43 476
Chris@366 477 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@43 478
Chris@595 479 int sourceChannel = (c % modelChannels);
Chris@43 480
Chris@595 481 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
Chris@43 482
Chris@595 483 float channelGain = gain;
Chris@595 484 if (pan != 0.0) {
Chris@595 485 if (c == 0) {
Chris@595 486 if (pan > 0.0) channelGain *= 1.0f - pan;
Chris@595 487 } else {
Chris@595 488 if (pan < 0.0) channelGain *= pan + 1.0f;
Chris@595 489 }
Chris@595 490 }
Chris@43 491
Chris@595 492 for (sv_frame_t i = 0; i < fadeIn/2; ++i) {
Chris@595 493 float *back = buffer[c];
Chris@595 494 back -= fadeIn/2;
Chris@595 495 back[i] +=
Chris@436 496 (channelGain * m_channelBuffer[sourceChannel][i] * float(i))
Chris@436 497 / float(fadeIn);
Chris@595 498 }
Chris@43 499
Chris@595 500 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) {
Chris@595 501 float mult = channelGain;
Chris@595 502 if (i < fadeIn/2) {
Chris@595 503 mult = (mult * float(i)) / float(fadeIn);
Chris@595 504 }
Chris@595 505 if (i > frames - fadeOut/2) {
Chris@595 506 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut);
Chris@595 507 }
Chris@382 508 float val = m_channelBuffer[sourceChannel][i];
Chris@80 509 if (i >= got) val = 0.f;
Chris@595 510 buffer[c][i] += mult * val;
Chris@595 511 }
Chris@43 512 }
Chris@43 513
Chris@43 514 return got;
Chris@43 515 }
Chris@43 516
Chris@436 517 sv_frame_t
Chris@682 518 AudioGenerator::mixClipModel(ModelId modelId,
Chris@436 519 sv_frame_t startFrame, sv_frame_t frames,
Chris@313 520 float **buffer, float gain, float pan)
Chris@43 521 {
Chris@682 522 ClipMixer *clipMixer = m_clipMixerMap[modelId];
Chris@308 523 if (!clipMixer) return 0;
Chris@43 524
Chris@682 525 auto exportable = ModelById::getAs<NoteExportable>(modelId);
Chris@682 526
Chris@436 527 int blocks = int(frames / m_processingBlockSize);
Chris@43 528
Chris@313 529 //!!! todo: the below -- it matters
Chris@313 530
Chris@43 531 //!!! hang on -- the fact that the audio callback play source's
Chris@43 532 //buffer is a multiple of the plugin's buffer size doesn't mean
Chris@43 533 //that we always get called for a multiple of it here (because it
Chris@43 534 //also depends on the JACK block size). how should we ensure that
Chris@43 535 //all models write the same amount in to the mix, and that we
Chris@43 536 //always have a multiple of the plugin buffer size? I guess this
Chris@43 537 //class has to be queryable for the plugin buffer size & the
Chris@43 538 //callback play source has to use that as a multiple for all the
Chris@43 539 //calls to mixModel
Chris@43 540
Chris@436 541 sv_frame_t got = blocks * m_processingBlockSize;
Chris@43 542
Chris@43 543 #ifdef DEBUG_AUDIO_GENERATOR
Chris@442 544 cout << "mixModel [clip]: start " << startFrame << ", frames " << frames
Chris@442 545 << ", blocks " << blocks << ", have " << m_noteOffs.size()
Chris@442 546 << " note-offs" << endl;
Chris@43 547 #endif
Chris@43 548
Chris@308 549 ClipMixer::NoteStart on;
Chris@308 550 ClipMixer::NoteEnd off;
Chris@43 551
Chris@682 552 NoteOffSet &noteOffs = m_noteOffs[modelId];
Chris@43 553
Chris@308 554 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@308 555
Chris@646 556 //!!! + for first block, prime with notes already active
Chris@646 557
Chris@366 558 for (int i = 0; i < blocks; ++i) {
Chris@43 559
Chris@595 560 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
Chris@43 561
Chris@299 562 NoteList notes;
Chris@299 563 if (exportable) {
Chris@646 564 notes = exportable->getNotesStartingWithin(reqStart,
Chris@646 565 m_processingBlockSize);
Chris@299 566 }
Chris@43 567
Chris@308 568 std::vector<ClipMixer::NoteStart> starts;
Chris@308 569 std::vector<ClipMixer::NoteEnd> ends;
Chris@43 570
Chris@615 571 while (noteOffs.begin() != noteOffs.end() &&
Chris@615 572 noteOffs.begin()->onFrame > reqStart) {
Chris@615 573
Chris@615 574 // We must have jumped back in time, as there is a
Chris@615 575 // note-off pending for a note that hasn't begun yet. Emit
Chris@615 576 // the note-off now and discard
Chris@615 577
Chris@615 578 off.frameOffset = 0;
Chris@615 579 off.frequency = noteOffs.begin()->frequency;
Chris@615 580
Chris@615 581 #ifdef DEBUG_AUDIO_GENERATOR
Chris@615 582 cerr << "mixModel [clip]: adding rewind-caused note-off at frame offset 0 frequency " << off.frequency << endl;
Chris@615 583 #endif
Chris@615 584
Chris@615 585 ends.push_back(off);
Chris@615 586 noteOffs.erase(noteOffs.begin());
Chris@615 587 }
Chris@615 588
Chris@595 589 for (NoteList::const_iterator ni = notes.begin();
Chris@275 590 ni != notes.end(); ++ni) {
Chris@43 591
Chris@595 592 sv_frame_t noteFrame = ni->start;
Chris@596 593 sv_frame_t noteDuration = ni->duration;
Chris@43 594
Chris@595 595 if (noteFrame < reqStart ||
Chris@596 596 noteFrame >= reqStart + m_processingBlockSize) {
Chris@596 597 continue;
Chris@596 598 }
Chris@596 599
Chris@596 600 if (noteDuration == 0) {
Chris@596 601 // If we have a note-off and a note-on with the same
Chris@596 602 // time, then the note-off will be assumed (in the
Chris@596 603 // logic below that deals with two-point note-on/off
Chris@596 604 // events) to be switching off an earlier note before
Chris@596 605 // this one begins -- that's necessary in order to
Chris@596 606 // support adjoining notes of equal pitch. But it does
Chris@596 607 // mean we have to explicitly ignore zero-duration
Chris@596 608 // notes, otherwise they'll be played without end
Chris@596 609 #ifdef DEBUG_AUDIO_GENERATOR
Chris@596 610 cerr << "mixModel [clip]: zero-duration note found at frame " << noteFrame << ", skipping it" << endl;
Chris@596 611 #endif
Chris@596 612 continue;
Chris@596 613 }
Chris@43 614
Chris@595 615 while (noteOffs.begin() != noteOffs.end() &&
Chris@615 616 noteOffs.begin()->offFrame <= noteFrame) {
Chris@43 617
Chris@615 618 sv_frame_t eventFrame = noteOffs.begin()->offFrame;
Chris@308 619 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 620
Chris@308 621 off.frameOffset = eventFrame - reqStart;
Chris@308 622 off.frequency = noteOffs.begin()->frequency;
Chris@43 623
Chris@43 624 #ifdef DEBUG_AUDIO_GENERATOR
Chris@595 625 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 626 #endif
Chris@43 627
Chris@308 628 ends.push_back(off);
Chris@595 629 noteOffs.erase(noteOffs.begin());
Chris@595 630 }
Chris@43 631
Chris@308 632 on.frameOffset = noteFrame - reqStart;
Chris@308 633 on.frequency = ni->getFrequency();
Chris@436 634 on.level = float(ni->velocity) / 127.0f;
Chris@308 635 on.pan = pan;
Chris@43 636
Chris@43 637 #ifdef DEBUG_AUDIO_GENERATOR
Chris@595 638 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
Chris@43 639 #endif
Chris@595 640
Chris@308 641 starts.push_back(on);
Chris@595 642 noteOffs.insert
Chris@615 643 (NoteOff(on.frequency, noteFrame + noteDuration, noteFrame));
Chris@595 644 }
Chris@43 645
Chris@595 646 while (noteOffs.begin() != noteOffs.end() &&
Chris@615 647 noteOffs.begin()->offFrame <=
Chris@615 648 reqStart + m_processingBlockSize) {
Chris@43 649
Chris@615 650 sv_frame_t eventFrame = noteOffs.begin()->offFrame;
Chris@308 651 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 652
Chris@308 653 off.frameOffset = eventFrame - reqStart;
Chris@308 654 off.frequency = noteOffs.begin()->frequency;
Chris@43 655
Chris@43 656 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 657 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 658 #endif
Chris@43 659
Chris@308 660 ends.push_back(off);
Chris@308 661 noteOffs.erase(noteOffs.begin());
Chris@595 662 }
Chris@43 663
Chris@595 664 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@308 665 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@308 666 }
Chris@43 667
Chris@308 668 clipMixer->mix(bufferIndexes, gain, starts, ends);
Chris@308 669 }
Chris@43 670
Chris@308 671 delete[] bufferIndexes;
Chris@43 672
Chris@43 673 return got;
Chris@43 674 }
Chris@313 675
Chris@436 676 sv_frame_t
Chris@682 677 AudioGenerator::mixContinuousSynthModel(ModelId modelId,
Chris@436 678 sv_frame_t startFrame,
Chris@436 679 sv_frame_t frames,
Chris@313 680 float **buffer,
Chris@313 681 float gain,
Chris@313 682 float pan)
Chris@313 683 {
Chris@682 684 ContinuousSynth *synth = m_continuousSynthMap[modelId];
Chris@313 685 if (!synth) return 0;
Chris@313 686
Chris@313 687 // only type we support here at the moment
Chris@682 688 auto stvm = ModelById::getAs<SparseTimeValueModel>(modelId);
Chris@682 689 if (!stvm) return 0;
Chris@313 690 if (stvm->getScaleUnits() != "Hz") return 0;
Chris@313 691
Chris@436 692 int blocks = int(frames / m_processingBlockSize);
Chris@313 693
Chris@313 694 //!!! todo: see comment in mixClipModel
Chris@313 695
Chris@436 696 sv_frame_t got = blocks * m_processingBlockSize;
Chris@313 697
Chris@313 698 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 699 cout << "mixModel [synth]: frames " << frames
Chris@595 700 << ", blocks " << blocks << endl;
Chris@313 701 #endif
Chris@313 702
Chris@313 703 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@313 704
Chris@366 705 for (int i = 0; i < blocks; ++i) {
Chris@313 706
Chris@595 707 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
Chris@313 708
Chris@595 709 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@313 710 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@313 711 }
Chris@313 712
Chris@649 713 EventVector points =
Chris@649 714 stvm->getEventsStartingWithin(reqStart, m_processingBlockSize);
Chris@313 715
Chris@313 716 // by default, repeat last frequency
Chris@313 717 float f0 = 0.f;
Chris@313 718
Chris@649 719 // go straight to the last freq in this range
Chris@649 720 if (!points.empty()) {
Chris@649 721 f0 = points.rbegin()->getValue();
Chris@313 722 }
Chris@313 723
Chris@649 724 // if there is no such frequency and the next point is further
Chris@314 725 // away than twice the model resolution, go silent (same
Chris@314 726 // criterion TimeValueLayer uses for ending a discrete curve
Chris@314 727 // segment)
Chris@314 728 if (f0 == 0.f) {
Chris@650 729 Event nextP;
Chris@650 730 if (!stvm->getNearestEventMatching(reqStart + m_processingBlockSize,
Chris@650 731 [](Event) { return true; },
Chris@650 732 EventSeries::Forward,
Chris@650 733 nextP) ||
Chris@650 734 nextP.getFrame() > reqStart + 2 * stvm->getResolution()) {
Chris@314 735 f0 = -1.f;
Chris@314 736 }
Chris@314 737 }
Chris@650 738
Chris@315 739 // cerr << "f0 = " << f0 << endl;
Chris@313 740
Chris@313 741 synth->mix(bufferIndexes,
Chris@313 742 gain,
Chris@313 743 pan,
Chris@313 744 f0);
Chris@313 745 }
Chris@313 746
Chris@313 747 delete[] bufferIndexes;
Chris@313 748
Chris@313 749 return got;
Chris@313 750 }
Chris@313 751