annotate audio/AudioGenerator.cpp @ 615:755fc02a1565

Associate a note-on time with each pending note-off as well, so we can check whether a rewind action (or looping) has caused us to jump to before the note began. Also improve implementation of note-off structure comparator
author Chris Cannam
date Mon, 13 Aug 2018 14:13:38 +0100
parents 88f9266e0417
children 7d3a6357ce64
rev   line source
Chris@43 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@43 2
Chris@43 3 /*
Chris@43 4 Sonic Visualiser
Chris@43 5 An audio file viewer and annotation editor.
Chris@43 6 Centre for Digital Music, Queen Mary, University of London.
Chris@43 7 This file copyright 2006 Chris Cannam.
Chris@43 8
Chris@43 9 This program is free software; you can redistribute it and/or
Chris@43 10 modify it under the terms of the GNU General Public License as
Chris@43 11 published by the Free Software Foundation; either version 2 of the
Chris@43 12 License, or (at your option) any later version. See the file
Chris@43 13 COPYING included with this distribution for more information.
Chris@43 14 */
Chris@43 15
Chris@43 16 #include "AudioGenerator.h"
Chris@43 17
Chris@43 18 #include "base/TempDirectory.h"
Chris@43 19 #include "base/PlayParameters.h"
Chris@43 20 #include "base/PlayParameterRepository.h"
Chris@43 21 #include "base/Pitch.h"
Chris@43 22 #include "base/Exceptions.h"
Chris@43 23
Chris@43 24 #include "data/model/NoteModel.h"
Chris@278 25 #include "data/model/FlexiNoteModel.h"
Chris@43 26 #include "data/model/DenseTimeValueModel.h"
Chris@313 27 #include "data/model/SparseTimeValueModel.h"
Chris@43 28 #include "data/model/SparseOneDimensionalModel.h"
Chris@299 29 #include "data/model/NoteData.h"
Chris@43 30
Chris@307 31 #include "ClipMixer.h"
Chris@313 32 #include "ContinuousSynth.h"
Chris@307 33
Chris@43 34 #include <iostream>
Chris@167 35 #include <cmath>
Chris@43 36
Chris@43 37 #include <QDir>
Chris@43 38 #include <QFile>
Chris@43 39
Chris@436 40 const sv_frame_t
Chris@315 41 AudioGenerator::m_processingBlockSize = 1024;
Chris@43 42
Chris@43 43 QString
Chris@43 44 AudioGenerator::m_sampleDir = "";
Chris@43 45
Chris@43 46 //#define DEBUG_AUDIO_GENERATOR 1
Chris@43 47
Chris@43 48 AudioGenerator::AudioGenerator() :
Chris@43 49 m_sourceSampleRate(0),
Chris@43 50 m_targetChannelCount(1),
Chris@348 51 m_waveType(0),
Chris@382 52 m_soloing(false),
Chris@382 53 m_channelBuffer(0),
Chris@382 54 m_channelBufSiz(0),
Chris@382 55 m_channelBufCount(0)
Chris@43 56 {
Chris@108 57 initialiseSampleDir();
Chris@43 58
Chris@43 59 connect(PlayParameterRepository::getInstance(),
Chris@309 60 SIGNAL(playClipIdChanged(const Playable *, QString)),
Chris@43 61 this,
Chris@309 62 SLOT(playClipIdChanged(const Playable *, QString)));
Chris@43 63 }
Chris@43 64
Chris@43 65 AudioGenerator::~AudioGenerator()
Chris@43 66 {
Chris@177 67 #ifdef DEBUG_AUDIO_GENERATOR
Chris@596 68 cerr << "AudioGenerator::~AudioGenerator" << endl;
Chris@177 69 #endif
Chris@593 70
Chris@593 71 for (int i = 0; i < m_channelBufCount; ++i) {
Chris@593 72 delete[] m_channelBuffer[i];
Chris@593 73 }
Chris@593 74 delete[] m_channelBuffer;
Chris@43 75 }
Chris@43 76
Chris@108 77 void
Chris@108 78 AudioGenerator::initialiseSampleDir()
Chris@43 79 {
Chris@108 80 if (m_sampleDir != "") return;
Chris@108 81
Chris@108 82 try {
Chris@108 83 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
Chris@598 84 } catch (const DirectoryCreationFailed &f) {
Chris@293 85 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
Chris@108 86 << " Failed to create temporary sample directory"
Chris@293 87 << endl;
Chris@108 88 m_sampleDir = "";
Chris@108 89 return;
Chris@108 90 }
Chris@108 91
Chris@108 92 QDir sampleResourceDir(":/samples", "*.wav");
Chris@108 93
Chris@108 94 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
Chris@108 95
Chris@108 96 QString fileName(sampleResourceDir[i]);
Chris@108 97 QFile file(sampleResourceDir.filePath(fileName));
Chris@151 98 QString target = QDir(m_sampleDir).filePath(fileName);
Chris@108 99
Chris@151 100 if (!file.copy(target)) {
Chris@293 101 cerr << "WARNING: AudioGenerator::getSampleDir: "
Chris@294 102 << "Unable to copy " << fileName
Chris@108 103 << " into temporary directory \""
Chris@293 104 << m_sampleDir << "\"" << endl;
Chris@151 105 } else {
Chris@151 106 QFile tf(target);
Chris@151 107 tf.setPermissions(tf.permissions() |
Chris@151 108 QFile::WriteOwner |
Chris@151 109 QFile::WriteUser);
Chris@108 110 }
Chris@43 111 }
Chris@43 112 }
Chris@43 113
Chris@43 114 bool
Chris@43 115 AudioGenerator::addModel(Model *model)
Chris@43 116 {
Chris@43 117 if (m_sourceSampleRate == 0) {
Chris@43 118
Chris@595 119 m_sourceSampleRate = model->getSampleRate();
Chris@43 120
Chris@43 121 } else {
Chris@43 122
Chris@595 123 DenseTimeValueModel *dtvm =
Chris@595 124 dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 125
Chris@595 126 if (dtvm) {
Chris@595 127 m_sourceSampleRate = model->getSampleRate();
Chris@595 128 return true;
Chris@595 129 }
Chris@43 130 }
Chris@307 131
Chris@418 132 const Playable *playable = model;
Chris@418 133 if (!playable || !playable->canPlay()) return 0;
Chris@418 134
Chris@418 135 PlayParameters *parameters =
Chris@595 136 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@418 137
Chris@418 138 bool willPlay = !parameters->isPlayMuted();
Chris@418 139
Chris@313 140 if (usesClipMixer(model)) {
Chris@313 141 ClipMixer *mixer = makeClipMixerFor(model);
Chris@313 142 if (mixer) {
Chris@313 143 QMutexLocker locker(&m_mutex);
Chris@313 144 m_clipMixerMap[model] = mixer;
Chris@418 145 return willPlay;
Chris@313 146 }
Chris@313 147 }
Chris@313 148
Chris@313 149 if (usesContinuousSynth(model)) {
Chris@313 150 ContinuousSynth *synth = makeSynthFor(model);
Chris@313 151 if (synth) {
Chris@313 152 QMutexLocker locker(&m_mutex);
Chris@313 153 m_continuousSynthMap[model] = synth;
Chris@418 154 return willPlay;
Chris@313 155 }
Chris@43 156 }
Chris@307 157
Chris@43 158 return false;
Chris@43 159 }
Chris@43 160
Chris@43 161 void
Chris@309 162 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
Chris@43 163 {
Chris@108 164 const Model *model = dynamic_cast<const Model *>(playable);
Chris@108 165 if (!model) {
Chris@309 166 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
Chris@108 167 << playable << " is not a supported model type"
Chris@293 168 << endl;
Chris@108 169 return;
Chris@108 170 }
Chris@108 171
Chris@307 172 if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
Chris@307 173
Chris@307 174 ClipMixer *mixer = makeClipMixerFor(model);
Chris@307 175 if (mixer) {
Chris@43 176 QMutexLocker locker(&m_mutex);
Chris@307 177 m_clipMixerMap[model] = mixer;
Chris@43 178 }
Chris@43 179 }
Chris@308 180
Chris@313 181 bool
Chris@313 182 AudioGenerator::usesClipMixer(const Model *model)
Chris@43 183 {
Chris@313 184 bool clip =
Chris@313 185 (qobject_cast<const SparseOneDimensionalModel *>(model) ||
Chris@313 186 qobject_cast<const NoteModel *>(model) ||
Chris@313 187 qobject_cast<const FlexiNoteModel *>(model));
Chris@313 188 return clip;
Chris@43 189 }
Chris@43 190
Chris@313 191 bool
Chris@349 192 AudioGenerator::wantsQuieterClips(const Model *model)
Chris@349 193 {
Chris@349 194 // basically, anything that usually has sustain (like notes) or
Chris@349 195 // often has multiple sounds at once (like notes) wants to use a
Chris@349 196 // quieter level than simple click tracks
Chris@349 197 bool does =
Chris@349 198 (qobject_cast<const NoteModel *>(model) ||
Chris@349 199 qobject_cast<const FlexiNoteModel *>(model));
Chris@349 200 return does;
Chris@349 201 }
Chris@349 202
Chris@349 203 bool
Chris@313 204 AudioGenerator::usesContinuousSynth(const Model *model)
Chris@43 205 {
Chris@313 206 bool cont =
Chris@313 207 (qobject_cast<const SparseTimeValueModel *>(model));
Chris@313 208 return cont;
Chris@313 209 }
Chris@313 210
Chris@307 211 ClipMixer *
Chris@307 212 AudioGenerator::makeClipMixerFor(const Model *model)
Chris@43 213 {
Chris@309 214 QString clipId;
Chris@43 215
Chris@108 216 const Playable *playable = model;
Chris@108 217 if (!playable || !playable->canPlay()) return 0;
Chris@108 218
Chris@43 219 PlayParameters *parameters =
Chris@595 220 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 221 if (parameters) {
Chris@309 222 clipId = parameters->getPlayClipId();
Chris@43 223 }
Chris@43 224
Chris@445 225 #ifdef DEBUG_AUDIO_GENERATOR
Chris@309 226 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
Chris@445 227 #endif
Chris@276 228
Chris@309 229 if (clipId == "") {
Chris@308 230 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
Chris@276 231 return 0;
Chris@276 232 }
Chris@43 233
Chris@308 234 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
Chris@308 235 m_sourceSampleRate,
Chris@308 236 m_processingBlockSize);
Chris@307 237
Chris@436 238 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required
Chris@307 239
Chris@309 240 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
Chris@307 241
Chris@436 242 double level = wantsQuieterClips(model) ? 0.5 : 1.0;
Chris@349 243 if (!mixer->loadClipData(clipPath, clipF0, level)) {
Chris@308 244 delete mixer;
Chris@43 245 return 0;
Chris@43 246 }
Chris@43 247
Chris@445 248 #ifdef DEBUG_AUDIO_GENERATOR
Chris@309 249 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
Chris@445 250 #endif
Chris@43 251
Chris@308 252 return mixer;
Chris@308 253 }
Chris@43 254
Chris@313 255 ContinuousSynth *
Chris@313 256 AudioGenerator::makeSynthFor(const Model *model)
Chris@313 257 {
Chris@313 258 const Playable *playable = model;
Chris@313 259 if (!playable || !playable->canPlay()) return 0;
Chris@313 260
Chris@313 261 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
Chris@313 262 m_sourceSampleRate,
rmb456@323 263 m_processingBlockSize,
rmb456@323 264 m_waveType);
Chris@313 265
Chris@445 266 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 267 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
Chris@445 268 #endif
Chris@313 269
Chris@313 270 return synth;
Chris@313 271 }
Chris@313 272
Chris@43 273 void
Chris@43 274 AudioGenerator::removeModel(Model *model)
Chris@43 275 {
Chris@43 276 SparseOneDimensionalModel *sodm =
Chris@595 277 dynamic_cast<SparseOneDimensionalModel *>(model);
Chris@43 278 if (!sodm) return; // nothing to do
Chris@43 279
Chris@43 280 QMutexLocker locker(&m_mutex);
Chris@43 281
Chris@308 282 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
Chris@43 283
Chris@308 284 ClipMixer *mixer = m_clipMixerMap[sodm];
Chris@308 285 m_clipMixerMap.erase(sodm);
Chris@308 286 delete mixer;
Chris@43 287 }
Chris@43 288
Chris@43 289 void
Chris@43 290 AudioGenerator::clearModels()
Chris@43 291 {
Chris@43 292 QMutexLocker locker(&m_mutex);
Chris@308 293
Chris@308 294 while (!m_clipMixerMap.empty()) {
Chris@308 295 ClipMixer *mixer = m_clipMixerMap.begin()->second;
Chris@595 296 m_clipMixerMap.erase(m_clipMixerMap.begin());
Chris@595 297 delete mixer;
Chris@43 298 }
Chris@43 299 }
Chris@43 300
Chris@43 301 void
Chris@43 302 AudioGenerator::reset()
Chris@43 303 {
Chris@43 304 QMutexLocker locker(&m_mutex);
Chris@308 305
Chris@445 306 #ifdef DEBUG_AUDIO_GENERATOR
Chris@397 307 cerr << "AudioGenerator::reset()" << endl;
Chris@445 308 #endif
Chris@397 309
Chris@308 310 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@595 311 if (i->second) {
Chris@595 312 i->second->reset();
Chris@595 313 }
Chris@43 314 }
Chris@43 315
Chris@43 316 m_noteOffs.clear();
Chris@43 317 }
Chris@43 318
Chris@43 319 void
Chris@366 320 AudioGenerator::setTargetChannelCount(int targetChannelCount)
Chris@43 321 {
Chris@43 322 if (m_targetChannelCount == targetChannelCount) return;
Chris@43 323
Chris@233 324 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
Chris@43 325
Chris@43 326 QMutexLocker locker(&m_mutex);
Chris@43 327 m_targetChannelCount = targetChannelCount;
Chris@43 328
Chris@308 329 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@595 330 if (i->second) i->second->setChannelCount(targetChannelCount);
Chris@43 331 }
Chris@43 332 }
Chris@43 333
Chris@436 334 sv_frame_t
Chris@43 335 AudioGenerator::getBlockSize() const
Chris@43 336 {
Chris@305 337 return m_processingBlockSize;
Chris@43 338 }
Chris@43 339
Chris@43 340 void
Chris@43 341 AudioGenerator::setSoloModelSet(std::set<Model *> s)
Chris@43 342 {
Chris@43 343 QMutexLocker locker(&m_mutex);
Chris@43 344
Chris@43 345 m_soloModelSet = s;
Chris@43 346 m_soloing = true;
Chris@43 347 }
Chris@43 348
Chris@43 349 void
Chris@43 350 AudioGenerator::clearSoloModelSet()
Chris@43 351 {
Chris@43 352 QMutexLocker locker(&m_mutex);
Chris@43 353
Chris@43 354 m_soloModelSet.clear();
Chris@43 355 m_soloing = false;
Chris@43 356 }
Chris@43 357
Chris@436 358 sv_frame_t
Chris@613 359 AudioGenerator::mixModel(Model *model,
Chris@613 360 sv_frame_t startFrame, sv_frame_t frameCount,
Chris@613 361 float **buffer,
Chris@613 362 sv_frame_t fadeIn, sv_frame_t fadeOut)
Chris@43 363 {
Chris@43 364 if (m_sourceSampleRate == 0) {
Chris@595 365 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
Chris@595 366 return frameCount;
Chris@43 367 }
Chris@43 368
Chris@43 369 QMutexLocker locker(&m_mutex);
Chris@43 370
Chris@108 371 Playable *playable = model;
Chris@108 372 if (!playable || !playable->canPlay()) return frameCount;
Chris@108 373
Chris@43 374 PlayParameters *parameters =
Chris@595 375 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 376 if (!parameters) return frameCount;
Chris@43 377
Chris@43 378 bool playing = !parameters->isPlayMuted();
Chris@43 379 if (!playing) {
Chris@43 380 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 381 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
Chris@43 382 #endif
Chris@43 383 return frameCount;
Chris@43 384 }
Chris@43 385
Chris@43 386 if (m_soloing) {
Chris@43 387 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
Chris@43 388 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 389 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
Chris@43 390 #endif
Chris@43 391 return frameCount;
Chris@43 392 }
Chris@43 393 }
Chris@43 394
Chris@43 395 float gain = parameters->getPlayGain();
Chris@43 396 float pan = parameters->getPlayPan();
Chris@43 397
Chris@43 398 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 399 if (dtvm) {
Chris@595 400 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
Chris@595 401 buffer, gain, pan, fadeIn, fadeOut);
Chris@43 402 }
Chris@43 403
Chris@313 404 if (usesClipMixer(model)) {
Chris@313 405 return mixClipModel(model, startFrame, frameCount,
Chris@313 406 buffer, gain, pan);
Chris@313 407 }
Chris@43 408
Chris@313 409 if (usesContinuousSynth(model)) {
Chris@313 410 return mixContinuousSynthModel(model, startFrame, frameCount,
Chris@313 411 buffer, gain, pan);
Chris@43 412 }
Chris@43 413
Chris@276 414 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
Chris@276 415
Chris@43 416 return frameCount;
Chris@43 417 }
Chris@43 418
Chris@436 419 sv_frame_t
Chris@43 420 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
Chris@595 421 sv_frame_t startFrame, sv_frame_t frames,
Chris@595 422 float **buffer, float gain, float pan,
Chris@595 423 sv_frame_t fadeIn, sv_frame_t fadeOut)
Chris@43 424 {
Chris@436 425 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut);
Chris@43 426
Chris@366 427 int modelChannels = dtvm->getChannelCount();
Chris@80 428
Chris@382 429 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
Chris@80 430
Chris@382 431 for (int c = 0; c < m_channelBufCount; ++c) {
Chris@382 432 delete[] m_channelBuffer[c];
Chris@80 433 }
Chris@80 434
Chris@595 435 delete[] m_channelBuffer;
Chris@382 436 m_channelBuffer = new float *[modelChannels];
Chris@80 437
Chris@366 438 for (int c = 0; c < modelChannels; ++c) {
Chris@382 439 m_channelBuffer[c] = new float[maxFrames];
Chris@80 440 }
Chris@80 441
Chris@382 442 m_channelBufCount = modelChannels;
Chris@595 443 m_channelBufSiz = maxFrames;
Chris@43 444 }
Chris@80 445
Chris@436 446 sv_frame_t got = 0;
Chris@80 447
Chris@80 448 if (startFrame >= fadeIn/2) {
Chris@460 449
Chris@460 450 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
Chris@460 451 startFrame - fadeIn/2,
Chris@460 452 frames + fadeOut/2 + fadeIn/2);
Chris@460 453
Chris@460 454 for (int c = 0; c < modelChannels; ++c) {
Chris@460 455 copy(data[c].begin(), data[c].end(), m_channelBuffer[c]);
Chris@460 456 }
Chris@460 457
Chris@461 458 got = data[0].size();
Chris@460 459
Chris@80 460 } else {
Chris@436 461 sv_frame_t missing = fadeIn/2 - startFrame;
Chris@80 462
Chris@382 463 if (missing > 0) {
Chris@382 464 cerr << "note: channelBufSiz = " << m_channelBufSiz
Chris@382 465 << ", frames + fadeOut/2 = " << frames + fadeOut/2
Chris@382 466 << ", startFrame = " << startFrame
Chris@382 467 << ", missing = " << missing << endl;
Chris@80 468 }
Chris@80 469
Chris@460 470 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
Chris@460 471 startFrame,
Chris@460 472 frames + fadeOut/2);
Chris@366 473 for (int c = 0; c < modelChannels; ++c) {
Chris@460 474 copy(data[c].begin(), data[c].end(), m_channelBuffer[c] + missing);
Chris@80 475 }
Chris@80 476
Chris@461 477 got = data[0].size() + missing;
Chris@595 478 }
Chris@43 479
Chris@366 480 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@43 481
Chris@595 482 int sourceChannel = (c % modelChannels);
Chris@43 483
Chris@595 484 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
Chris@43 485
Chris@595 486 float channelGain = gain;
Chris@595 487 if (pan != 0.0) {
Chris@595 488 if (c == 0) {
Chris@595 489 if (pan > 0.0) channelGain *= 1.0f - pan;
Chris@595 490 } else {
Chris@595 491 if (pan < 0.0) channelGain *= pan + 1.0f;
Chris@595 492 }
Chris@595 493 }
Chris@43 494
Chris@595 495 for (sv_frame_t i = 0; i < fadeIn/2; ++i) {
Chris@595 496 float *back = buffer[c];
Chris@595 497 back -= fadeIn/2;
Chris@595 498 back[i] +=
Chris@436 499 (channelGain * m_channelBuffer[sourceChannel][i] * float(i))
Chris@436 500 / float(fadeIn);
Chris@595 501 }
Chris@43 502
Chris@595 503 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) {
Chris@595 504 float mult = channelGain;
Chris@595 505 if (i < fadeIn/2) {
Chris@595 506 mult = (mult * float(i)) / float(fadeIn);
Chris@595 507 }
Chris@595 508 if (i > frames - fadeOut/2) {
Chris@595 509 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut);
Chris@595 510 }
Chris@382 511 float val = m_channelBuffer[sourceChannel][i];
Chris@80 512 if (i >= got) val = 0.f;
Chris@595 513 buffer[c][i] += mult * val;
Chris@595 514 }
Chris@43 515 }
Chris@43 516
Chris@43 517 return got;
Chris@43 518 }
Chris@43 519
Chris@436 520 sv_frame_t
Chris@313 521 AudioGenerator::mixClipModel(Model *model,
Chris@436 522 sv_frame_t startFrame, sv_frame_t frames,
Chris@313 523 float **buffer, float gain, float pan)
Chris@43 524 {
Chris@308 525 ClipMixer *clipMixer = m_clipMixerMap[model];
Chris@308 526 if (!clipMixer) return 0;
Chris@43 527
Chris@436 528 int blocks = int(frames / m_processingBlockSize);
Chris@43 529
Chris@313 530 //!!! todo: the below -- it matters
Chris@313 531
Chris@43 532 //!!! hang on -- the fact that the audio callback play source's
Chris@43 533 //buffer is a multiple of the plugin's buffer size doesn't mean
Chris@43 534 //that we always get called for a multiple of it here (because it
Chris@43 535 //also depends on the JACK block size). how should we ensure that
Chris@43 536 //all models write the same amount in to the mix, and that we
Chris@43 537 //always have a multiple of the plugin buffer size? I guess this
Chris@43 538 //class has to be queryable for the plugin buffer size & the
Chris@43 539 //callback play source has to use that as a multiple for all the
Chris@43 540 //calls to mixModel
Chris@43 541
Chris@436 542 sv_frame_t got = blocks * m_processingBlockSize;
Chris@43 543
Chris@43 544 #ifdef DEBUG_AUDIO_GENERATOR
Chris@442 545 cout << "mixModel [clip]: start " << startFrame << ", frames " << frames
Chris@442 546 << ", blocks " << blocks << ", have " << m_noteOffs.size()
Chris@442 547 << " note-offs" << endl;
Chris@43 548 #endif
Chris@43 549
Chris@308 550 ClipMixer::NoteStart on;
Chris@308 551 ClipMixer::NoteEnd off;
Chris@43 552
Chris@275 553 NoteOffSet &noteOffs = m_noteOffs[model];
Chris@43 554
Chris@308 555 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@308 556
Chris@366 557 for (int i = 0; i < blocks; ++i) {
Chris@43 558
Chris@595 559 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
Chris@43 560
Chris@299 561 NoteList notes;
Chris@299 562 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
Chris@299 563 if (exportable) {
Chris@366 564 notes = exportable->getNotesWithin(reqStart,
Chris@366 565 reqStart + m_processingBlockSize);
Chris@299 566 }
Chris@43 567
Chris@308 568 std::vector<ClipMixer::NoteStart> starts;
Chris@308 569 std::vector<ClipMixer::NoteEnd> ends;
Chris@43 570
Chris@615 571 while (noteOffs.begin() != noteOffs.end() &&
Chris@615 572 noteOffs.begin()->onFrame > reqStart) {
Chris@615 573
Chris@615 574 // We must have jumped back in time, as there is a
Chris@615 575 // note-off pending for a note that hasn't begun yet. Emit
Chris@615 576 // the note-off now and discard
Chris@615 577
Chris@615 578 off.frameOffset = 0;
Chris@615 579 off.frequency = noteOffs.begin()->frequency;
Chris@615 580
Chris@615 581 #ifdef DEBUG_AUDIO_GENERATOR
Chris@615 582 cerr << "mixModel [clip]: adding rewind-caused note-off at frame offset 0 frequency " << off.frequency << endl;
Chris@615 583 #endif
Chris@615 584
Chris@615 585 ends.push_back(off);
Chris@615 586 noteOffs.erase(noteOffs.begin());
Chris@615 587 }
Chris@615 588
Chris@595 589 for (NoteList::const_iterator ni = notes.begin();
Chris@275 590 ni != notes.end(); ++ni) {
Chris@43 591
Chris@595 592 sv_frame_t noteFrame = ni->start;
Chris@596 593 sv_frame_t noteDuration = ni->duration;
Chris@43 594
Chris@595 595 if (noteFrame < reqStart ||
Chris@596 596 noteFrame >= reqStart + m_processingBlockSize) {
Chris@596 597 continue;
Chris@596 598 }
Chris@596 599
Chris@596 600 if (noteDuration == 0) {
Chris@596 601 // If we have a note-off and a note-on with the same
Chris@596 602 // time, then the note-off will be assumed (in the
Chris@596 603 // logic below that deals with two-point note-on/off
Chris@596 604 // events) to be switching off an earlier note before
Chris@596 605 // this one begins -- that's necessary in order to
Chris@596 606 // support adjoining notes of equal pitch. But it does
Chris@596 607 // mean we have to explicitly ignore zero-duration
Chris@596 608 // notes, otherwise they'll be played without end
Chris@596 609 #ifdef DEBUG_AUDIO_GENERATOR
Chris@596 610 cerr << "mixModel [clip]: zero-duration note found at frame " << noteFrame << ", skipping it" << endl;
Chris@596 611 #endif
Chris@596 612 continue;
Chris@596 613 }
Chris@43 614
Chris@595 615 while (noteOffs.begin() != noteOffs.end() &&
Chris@615 616 noteOffs.begin()->offFrame <= noteFrame) {
Chris@43 617
Chris@615 618 sv_frame_t eventFrame = noteOffs.begin()->offFrame;
Chris@308 619 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 620
Chris@308 621 off.frameOffset = eventFrame - reqStart;
Chris@308 622 off.frequency = noteOffs.begin()->frequency;
Chris@43 623
Chris@43 624 #ifdef DEBUG_AUDIO_GENERATOR
Chris@595 625 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 626 #endif
Chris@43 627
Chris@308 628 ends.push_back(off);
Chris@595 629 noteOffs.erase(noteOffs.begin());
Chris@595 630 }
Chris@43 631
Chris@308 632 on.frameOffset = noteFrame - reqStart;
Chris@308 633 on.frequency = ni->getFrequency();
Chris@436 634 on.level = float(ni->velocity) / 127.0f;
Chris@308 635 on.pan = pan;
Chris@43 636
Chris@43 637 #ifdef DEBUG_AUDIO_GENERATOR
Chris@595 638 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
Chris@43 639 #endif
Chris@595 640
Chris@308 641 starts.push_back(on);
Chris@595 642 noteOffs.insert
Chris@615 643 (NoteOff(on.frequency, noteFrame + noteDuration, noteFrame));
Chris@595 644 }
Chris@43 645
Chris@595 646 while (noteOffs.begin() != noteOffs.end() &&
Chris@615 647 noteOffs.begin()->offFrame <=
Chris@615 648 reqStart + m_processingBlockSize) {
Chris@43 649
Chris@615 650 sv_frame_t eventFrame = noteOffs.begin()->offFrame;
Chris@308 651 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 652
Chris@308 653 off.frameOffset = eventFrame - reqStart;
Chris@308 654 off.frequency = noteOffs.begin()->frequency;
Chris@43 655
Chris@43 656 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 657 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 658 #endif
Chris@43 659
Chris@308 660 ends.push_back(off);
Chris@308 661 noteOffs.erase(noteOffs.begin());
Chris@595 662 }
Chris@43 663
Chris@595 664 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@308 665 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@308 666 }
Chris@43 667
Chris@308 668 clipMixer->mix(bufferIndexes, gain, starts, ends);
Chris@308 669 }
Chris@43 670
Chris@308 671 delete[] bufferIndexes;
Chris@43 672
Chris@43 673 return got;
Chris@43 674 }
Chris@313 675
Chris@436 676 sv_frame_t
Chris@313 677 AudioGenerator::mixContinuousSynthModel(Model *model,
Chris@436 678 sv_frame_t startFrame,
Chris@436 679 sv_frame_t frames,
Chris@313 680 float **buffer,
Chris@313 681 float gain,
Chris@313 682 float pan)
Chris@313 683 {
Chris@313 684 ContinuousSynth *synth = m_continuousSynthMap[model];
Chris@313 685 if (!synth) return 0;
Chris@313 686
Chris@313 687 // only type we support here at the moment
Chris@313 688 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
Chris@313 689 if (stvm->getScaleUnits() != "Hz") return 0;
Chris@313 690
Chris@436 691 int blocks = int(frames / m_processingBlockSize);
Chris@313 692
Chris@313 693 //!!! todo: see comment in mixClipModel
Chris@313 694
Chris@436 695 sv_frame_t got = blocks * m_processingBlockSize;
Chris@313 696
Chris@313 697 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 698 cout << "mixModel [synth]: frames " << frames
Chris@595 699 << ", blocks " << blocks << endl;
Chris@313 700 #endif
Chris@313 701
Chris@313 702 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@313 703
Chris@366 704 for (int i = 0; i < blocks; ++i) {
Chris@313 705
Chris@595 706 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
Chris@313 707
Chris@595 708 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@313 709 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@313 710 }
Chris@313 711
Chris@313 712 SparseTimeValueModel::PointList points =
Chris@313 713 stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
Chris@313 714
Chris@313 715 // by default, repeat last frequency
Chris@313 716 float f0 = 0.f;
Chris@313 717
Chris@313 718 // go straight to the last freq that is genuinely in this range
Chris@313 719 for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
Chris@313 720 itr != points.begin(); ) {
Chris@313 721 --itr;
Chris@313 722 if (itr->frame >= reqStart &&
Chris@313 723 itr->frame < reqStart + m_processingBlockSize) {
Chris@313 724 f0 = itr->value;
Chris@313 725 break;
Chris@313 726 }
Chris@313 727 }
Chris@313 728
Chris@314 729 // if we found no such frequency and the next point is further
Chris@314 730 // away than twice the model resolution, go silent (same
Chris@314 731 // criterion TimeValueLayer uses for ending a discrete curve
Chris@314 732 // segment)
Chris@314 733 if (f0 == 0.f) {
Chris@314 734 SparseTimeValueModel::PointList nextPoints =
Chris@314 735 stvm->getNextPoints(reqStart + m_processingBlockSize);
Chris@314 736 if (nextPoints.empty() ||
Chris@314 737 nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
Chris@314 738 f0 = -1.f;
Chris@314 739 }
Chris@314 740 }
Chris@314 741
Chris@315 742 // cerr << "f0 = " << f0 << endl;
Chris@313 743
Chris@313 744 synth->mix(bufferIndexes,
Chris@313 745 gain,
Chris@313 746 pan,
Chris@313 747 f0);
Chris@313 748 }
Chris@313 749
Chris@313 750 delete[] bufferIndexes;
Chris@313 751
Chris@313 752 return got;
Chris@313 753 }
Chris@313 754