annotate audio/AudioGenerator.cpp @ 596:c99892f0c5c3

Proper handling for notes that end at the same frame as a subsequent note of the same pitch begins. The note-off needs to be associated with the prior note, not a spurious zero-duration version of the subsequent note.
author Chris Cannam
date Wed, 18 Apr 2018 15:19:09 +0100
parents b23bebfdfaba
children 7da68349a0c5
rev   line source
Chris@43 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@43 2
Chris@43 3 /*
Chris@43 4 Sonic Visualiser
Chris@43 5 An audio file viewer and annotation editor.
Chris@43 6 Centre for Digital Music, Queen Mary, University of London.
Chris@43 7 This file copyright 2006 Chris Cannam.
Chris@43 8
Chris@43 9 This program is free software; you can redistribute it and/or
Chris@43 10 modify it under the terms of the GNU General Public License as
Chris@43 11 published by the Free Software Foundation; either version 2 of the
Chris@43 12 License, or (at your option) any later version. See the file
Chris@43 13 COPYING included with this distribution for more information.
Chris@43 14 */
Chris@43 15
Chris@43 16 #include "AudioGenerator.h"
Chris@43 17
Chris@43 18 #include "base/TempDirectory.h"
Chris@43 19 #include "base/PlayParameters.h"
Chris@43 20 #include "base/PlayParameterRepository.h"
Chris@43 21 #include "base/Pitch.h"
Chris@43 22 #include "base/Exceptions.h"
Chris@43 23
Chris@43 24 #include "data/model/NoteModel.h"
Chris@278 25 #include "data/model/FlexiNoteModel.h"
Chris@43 26 #include "data/model/DenseTimeValueModel.h"
Chris@313 27 #include "data/model/SparseTimeValueModel.h"
Chris@43 28 #include "data/model/SparseOneDimensionalModel.h"
Chris@299 29 #include "data/model/NoteData.h"
Chris@43 30
Chris@307 31 #include "ClipMixer.h"
Chris@313 32 #include "ContinuousSynth.h"
Chris@307 33
Chris@43 34 #include <iostream>
Chris@167 35 #include <cmath>
Chris@43 36
Chris@43 37 #include <QDir>
Chris@43 38 #include <QFile>
Chris@43 39
Chris@436 40 const sv_frame_t
Chris@315 41 AudioGenerator::m_processingBlockSize = 1024;
Chris@43 42
Chris@43 43 QString
Chris@43 44 AudioGenerator::m_sampleDir = "";
Chris@43 45
Chris@43 46 //#define DEBUG_AUDIO_GENERATOR 1
Chris@43 47
Chris@43 48 AudioGenerator::AudioGenerator() :
Chris@43 49 m_sourceSampleRate(0),
Chris@43 50 m_targetChannelCount(1),
Chris@348 51 m_waveType(0),
Chris@382 52 m_soloing(false),
Chris@382 53 m_channelBuffer(0),
Chris@382 54 m_channelBufSiz(0),
Chris@382 55 m_channelBufCount(0)
Chris@43 56 {
Chris@108 57 initialiseSampleDir();
Chris@43 58
Chris@43 59 connect(PlayParameterRepository::getInstance(),
Chris@309 60 SIGNAL(playClipIdChanged(const Playable *, QString)),
Chris@43 61 this,
Chris@309 62 SLOT(playClipIdChanged(const Playable *, QString)));
Chris@43 63 }
Chris@43 64
Chris@43 65 AudioGenerator::~AudioGenerator()
Chris@43 66 {
Chris@177 67 #ifdef DEBUG_AUDIO_GENERATOR
Chris@596 68 cerr << "AudioGenerator::~AudioGenerator" << endl;
Chris@177 69 #endif
Chris@593 70
Chris@593 71 for (int i = 0; i < m_channelBufCount; ++i) {
Chris@593 72 delete[] m_channelBuffer[i];
Chris@593 73 }
Chris@593 74 delete[] m_channelBuffer;
Chris@43 75 }
Chris@43 76
Chris@108 77 void
Chris@108 78 AudioGenerator::initialiseSampleDir()
Chris@43 79 {
Chris@108 80 if (m_sampleDir != "") return;
Chris@108 81
Chris@108 82 try {
Chris@108 83 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
Chris@108 84 } catch (DirectoryCreationFailed f) {
Chris@293 85 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
Chris@108 86 << " Failed to create temporary sample directory"
Chris@293 87 << endl;
Chris@108 88 m_sampleDir = "";
Chris@108 89 return;
Chris@108 90 }
Chris@108 91
Chris@108 92 QDir sampleResourceDir(":/samples", "*.wav");
Chris@108 93
Chris@108 94 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
Chris@108 95
Chris@108 96 QString fileName(sampleResourceDir[i]);
Chris@108 97 QFile file(sampleResourceDir.filePath(fileName));
Chris@151 98 QString target = QDir(m_sampleDir).filePath(fileName);
Chris@108 99
Chris@151 100 if (!file.copy(target)) {
Chris@293 101 cerr << "WARNING: AudioGenerator::getSampleDir: "
Chris@294 102 << "Unable to copy " << fileName
Chris@108 103 << " into temporary directory \""
Chris@293 104 << m_sampleDir << "\"" << endl;
Chris@151 105 } else {
Chris@151 106 QFile tf(target);
Chris@151 107 tf.setPermissions(tf.permissions() |
Chris@151 108 QFile::WriteOwner |
Chris@151 109 QFile::WriteUser);
Chris@108 110 }
Chris@43 111 }
Chris@43 112 }
Chris@43 113
Chris@43 114 bool
Chris@43 115 AudioGenerator::addModel(Model *model)
Chris@43 116 {
Chris@43 117 if (m_sourceSampleRate == 0) {
Chris@43 118
Chris@595 119 m_sourceSampleRate = model->getSampleRate();
Chris@43 120
Chris@43 121 } else {
Chris@43 122
Chris@595 123 DenseTimeValueModel *dtvm =
Chris@595 124 dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 125
Chris@595 126 if (dtvm) {
Chris@595 127 m_sourceSampleRate = model->getSampleRate();
Chris@595 128 return true;
Chris@595 129 }
Chris@43 130 }
Chris@307 131
Chris@418 132 const Playable *playable = model;
Chris@418 133 if (!playable || !playable->canPlay()) return 0;
Chris@418 134
Chris@418 135 PlayParameters *parameters =
Chris@595 136 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@418 137
Chris@418 138 bool willPlay = !parameters->isPlayMuted();
Chris@418 139
Chris@313 140 if (usesClipMixer(model)) {
Chris@313 141 ClipMixer *mixer = makeClipMixerFor(model);
Chris@313 142 if (mixer) {
Chris@313 143 QMutexLocker locker(&m_mutex);
Chris@313 144 m_clipMixerMap[model] = mixer;
Chris@418 145 return willPlay;
Chris@313 146 }
Chris@313 147 }
Chris@313 148
Chris@313 149 if (usesContinuousSynth(model)) {
Chris@313 150 ContinuousSynth *synth = makeSynthFor(model);
Chris@313 151 if (synth) {
Chris@313 152 QMutexLocker locker(&m_mutex);
Chris@313 153 m_continuousSynthMap[model] = synth;
Chris@418 154 return willPlay;
Chris@313 155 }
Chris@43 156 }
Chris@307 157
Chris@43 158 return false;
Chris@43 159 }
Chris@43 160
Chris@43 161 void
Chris@309 162 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
Chris@43 163 {
Chris@108 164 const Model *model = dynamic_cast<const Model *>(playable);
Chris@108 165 if (!model) {
Chris@309 166 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
Chris@108 167 << playable << " is not a supported model type"
Chris@293 168 << endl;
Chris@108 169 return;
Chris@108 170 }
Chris@108 171
Chris@307 172 if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
Chris@307 173
Chris@307 174 ClipMixer *mixer = makeClipMixerFor(model);
Chris@307 175 if (mixer) {
Chris@43 176 QMutexLocker locker(&m_mutex);
Chris@307 177 m_clipMixerMap[model] = mixer;
Chris@43 178 }
Chris@43 179 }
Chris@308 180
Chris@313 181 bool
Chris@313 182 AudioGenerator::usesClipMixer(const Model *model)
Chris@43 183 {
Chris@313 184 bool clip =
Chris@313 185 (qobject_cast<const SparseOneDimensionalModel *>(model) ||
Chris@313 186 qobject_cast<const NoteModel *>(model) ||
Chris@313 187 qobject_cast<const FlexiNoteModel *>(model));
Chris@313 188 return clip;
Chris@43 189 }
Chris@43 190
Chris@313 191 bool
Chris@349 192 AudioGenerator::wantsQuieterClips(const Model *model)
Chris@349 193 {
Chris@349 194 // basically, anything that usually has sustain (like notes) or
Chris@349 195 // often has multiple sounds at once (like notes) wants to use a
Chris@349 196 // quieter level than simple click tracks
Chris@349 197 bool does =
Chris@349 198 (qobject_cast<const NoteModel *>(model) ||
Chris@349 199 qobject_cast<const FlexiNoteModel *>(model));
Chris@349 200 return does;
Chris@349 201 }
Chris@349 202
Chris@349 203 bool
Chris@313 204 AudioGenerator::usesContinuousSynth(const Model *model)
Chris@43 205 {
Chris@313 206 bool cont =
Chris@313 207 (qobject_cast<const SparseTimeValueModel *>(model));
Chris@313 208 return cont;
Chris@313 209 }
Chris@313 210
Chris@307 211 ClipMixer *
Chris@307 212 AudioGenerator::makeClipMixerFor(const Model *model)
Chris@43 213 {
Chris@309 214 QString clipId;
Chris@43 215
Chris@108 216 const Playable *playable = model;
Chris@108 217 if (!playable || !playable->canPlay()) return 0;
Chris@108 218
Chris@43 219 PlayParameters *parameters =
Chris@595 220 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 221 if (parameters) {
Chris@309 222 clipId = parameters->getPlayClipId();
Chris@43 223 }
Chris@43 224
Chris@445 225 #ifdef DEBUG_AUDIO_GENERATOR
Chris@309 226 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
Chris@445 227 #endif
Chris@276 228
Chris@309 229 if (clipId == "") {
Chris@308 230 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
Chris@276 231 return 0;
Chris@276 232 }
Chris@43 233
Chris@308 234 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
Chris@308 235 m_sourceSampleRate,
Chris@308 236 m_processingBlockSize);
Chris@307 237
Chris@436 238 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required
Chris@307 239
Chris@309 240 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
Chris@307 241
Chris@436 242 double level = wantsQuieterClips(model) ? 0.5 : 1.0;
Chris@349 243 if (!mixer->loadClipData(clipPath, clipF0, level)) {
Chris@308 244 delete mixer;
Chris@43 245 return 0;
Chris@43 246 }
Chris@43 247
Chris@445 248 #ifdef DEBUG_AUDIO_GENERATOR
Chris@309 249 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
Chris@445 250 #endif
Chris@43 251
Chris@308 252 return mixer;
Chris@308 253 }
Chris@43 254
Chris@313 255 ContinuousSynth *
Chris@313 256 AudioGenerator::makeSynthFor(const Model *model)
Chris@313 257 {
Chris@313 258 const Playable *playable = model;
Chris@313 259 if (!playable || !playable->canPlay()) return 0;
Chris@313 260
Chris@313 261 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
Chris@313 262 m_sourceSampleRate,
rmb456@323 263 m_processingBlockSize,
rmb456@323 264 m_waveType);
Chris@313 265
Chris@445 266 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 267 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
Chris@445 268 #endif
Chris@313 269
Chris@313 270 return synth;
Chris@313 271 }
Chris@313 272
Chris@43 273 void
Chris@43 274 AudioGenerator::removeModel(Model *model)
Chris@43 275 {
Chris@43 276 SparseOneDimensionalModel *sodm =
Chris@595 277 dynamic_cast<SparseOneDimensionalModel *>(model);
Chris@43 278 if (!sodm) return; // nothing to do
Chris@43 279
Chris@43 280 QMutexLocker locker(&m_mutex);
Chris@43 281
Chris@308 282 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
Chris@43 283
Chris@308 284 ClipMixer *mixer = m_clipMixerMap[sodm];
Chris@308 285 m_clipMixerMap.erase(sodm);
Chris@308 286 delete mixer;
Chris@43 287 }
Chris@43 288
Chris@43 289 void
Chris@43 290 AudioGenerator::clearModels()
Chris@43 291 {
Chris@43 292 QMutexLocker locker(&m_mutex);
Chris@308 293
Chris@308 294 while (!m_clipMixerMap.empty()) {
Chris@308 295 ClipMixer *mixer = m_clipMixerMap.begin()->second;
Chris@595 296 m_clipMixerMap.erase(m_clipMixerMap.begin());
Chris@595 297 delete mixer;
Chris@43 298 }
Chris@43 299 }
Chris@43 300
Chris@43 301 void
Chris@43 302 AudioGenerator::reset()
Chris@43 303 {
Chris@43 304 QMutexLocker locker(&m_mutex);
Chris@308 305
Chris@445 306 #ifdef DEBUG_AUDIO_GENERATOR
Chris@397 307 cerr << "AudioGenerator::reset()" << endl;
Chris@445 308 #endif
Chris@397 309
Chris@308 310 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@595 311 if (i->second) {
Chris@595 312 i->second->reset();
Chris@595 313 }
Chris@43 314 }
Chris@43 315
Chris@43 316 m_noteOffs.clear();
Chris@43 317 }
Chris@43 318
Chris@43 319 void
Chris@366 320 AudioGenerator::setTargetChannelCount(int targetChannelCount)
Chris@43 321 {
Chris@43 322 if (m_targetChannelCount == targetChannelCount) return;
Chris@43 323
Chris@233 324 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
Chris@43 325
Chris@43 326 QMutexLocker locker(&m_mutex);
Chris@43 327 m_targetChannelCount = targetChannelCount;
Chris@43 328
Chris@308 329 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@595 330 if (i->second) i->second->setChannelCount(targetChannelCount);
Chris@43 331 }
Chris@43 332 }
Chris@43 333
Chris@436 334 sv_frame_t
Chris@43 335 AudioGenerator::getBlockSize() const
Chris@43 336 {
Chris@305 337 return m_processingBlockSize;
Chris@43 338 }
Chris@43 339
Chris@43 340 void
Chris@43 341 AudioGenerator::setSoloModelSet(std::set<Model *> s)
Chris@43 342 {
Chris@43 343 QMutexLocker locker(&m_mutex);
Chris@43 344
Chris@43 345 m_soloModelSet = s;
Chris@43 346 m_soloing = true;
Chris@43 347 }
Chris@43 348
Chris@43 349 void
Chris@43 350 AudioGenerator::clearSoloModelSet()
Chris@43 351 {
Chris@43 352 QMutexLocker locker(&m_mutex);
Chris@43 353
Chris@43 354 m_soloModelSet.clear();
Chris@43 355 m_soloing = false;
Chris@43 356 }
Chris@43 357
Chris@436 358 sv_frame_t
Chris@436 359 AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
Chris@595 360 float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut)
Chris@43 361 {
Chris@43 362 if (m_sourceSampleRate == 0) {
Chris@595 363 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
Chris@595 364 return frameCount;
Chris@43 365 }
Chris@43 366
Chris@43 367 QMutexLocker locker(&m_mutex);
Chris@43 368
Chris@108 369 Playable *playable = model;
Chris@108 370 if (!playable || !playable->canPlay()) return frameCount;
Chris@108 371
Chris@43 372 PlayParameters *parameters =
Chris@595 373 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 374 if (!parameters) return frameCount;
Chris@43 375
Chris@43 376 bool playing = !parameters->isPlayMuted();
Chris@43 377 if (!playing) {
Chris@43 378 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 379 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
Chris@43 380 #endif
Chris@43 381 return frameCount;
Chris@43 382 }
Chris@43 383
Chris@43 384 if (m_soloing) {
Chris@43 385 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
Chris@43 386 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 387 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
Chris@43 388 #endif
Chris@43 389 return frameCount;
Chris@43 390 }
Chris@43 391 }
Chris@43 392
Chris@43 393 float gain = parameters->getPlayGain();
Chris@43 394 float pan = parameters->getPlayPan();
Chris@43 395
Chris@43 396 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 397 if (dtvm) {
Chris@595 398 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
Chris@595 399 buffer, gain, pan, fadeIn, fadeOut);
Chris@43 400 }
Chris@43 401
Chris@313 402 if (usesClipMixer(model)) {
Chris@313 403 return mixClipModel(model, startFrame, frameCount,
Chris@313 404 buffer, gain, pan);
Chris@313 405 }
Chris@43 406
Chris@313 407 if (usesContinuousSynth(model)) {
Chris@313 408 return mixContinuousSynthModel(model, startFrame, frameCount,
Chris@313 409 buffer, gain, pan);
Chris@43 410 }
Chris@43 411
Chris@276 412 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
Chris@276 413
Chris@43 414 return frameCount;
Chris@43 415 }
Chris@43 416
Chris@436 417 sv_frame_t
Chris@43 418 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
Chris@595 419 sv_frame_t startFrame, sv_frame_t frames,
Chris@595 420 float **buffer, float gain, float pan,
Chris@595 421 sv_frame_t fadeIn, sv_frame_t fadeOut)
Chris@43 422 {
Chris@436 423 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut);
Chris@43 424
Chris@366 425 int modelChannels = dtvm->getChannelCount();
Chris@80 426
Chris@382 427 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
Chris@80 428
Chris@382 429 for (int c = 0; c < m_channelBufCount; ++c) {
Chris@382 430 delete[] m_channelBuffer[c];
Chris@80 431 }
Chris@80 432
Chris@595 433 delete[] m_channelBuffer;
Chris@382 434 m_channelBuffer = new float *[modelChannels];
Chris@80 435
Chris@366 436 for (int c = 0; c < modelChannels; ++c) {
Chris@382 437 m_channelBuffer[c] = new float[maxFrames];
Chris@80 438 }
Chris@80 439
Chris@382 440 m_channelBufCount = modelChannels;
Chris@595 441 m_channelBufSiz = maxFrames;
Chris@43 442 }
Chris@80 443
Chris@436 444 sv_frame_t got = 0;
Chris@80 445
Chris@80 446 if (startFrame >= fadeIn/2) {
Chris@460 447
Chris@460 448 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
Chris@460 449 startFrame - fadeIn/2,
Chris@460 450 frames + fadeOut/2 + fadeIn/2);
Chris@460 451
Chris@460 452 for (int c = 0; c < modelChannels; ++c) {
Chris@460 453 copy(data[c].begin(), data[c].end(), m_channelBuffer[c]);
Chris@460 454 }
Chris@460 455
Chris@461 456 got = data[0].size();
Chris@460 457
Chris@80 458 } else {
Chris@436 459 sv_frame_t missing = fadeIn/2 - startFrame;
Chris@80 460
Chris@382 461 if (missing > 0) {
Chris@382 462 cerr << "note: channelBufSiz = " << m_channelBufSiz
Chris@382 463 << ", frames + fadeOut/2 = " << frames + fadeOut/2
Chris@382 464 << ", startFrame = " << startFrame
Chris@382 465 << ", missing = " << missing << endl;
Chris@80 466 }
Chris@80 467
Chris@460 468 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
Chris@460 469 startFrame,
Chris@460 470 frames + fadeOut/2);
Chris@366 471 for (int c = 0; c < modelChannels; ++c) {
Chris@460 472 copy(data[c].begin(), data[c].end(), m_channelBuffer[c] + missing);
Chris@80 473 }
Chris@80 474
Chris@461 475 got = data[0].size() + missing;
Chris@595 476 }
Chris@43 477
Chris@366 478 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@43 479
Chris@595 480 int sourceChannel = (c % modelChannels);
Chris@43 481
Chris@595 482 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
Chris@43 483
Chris@595 484 float channelGain = gain;
Chris@595 485 if (pan != 0.0) {
Chris@595 486 if (c == 0) {
Chris@595 487 if (pan > 0.0) channelGain *= 1.0f - pan;
Chris@595 488 } else {
Chris@595 489 if (pan < 0.0) channelGain *= pan + 1.0f;
Chris@595 490 }
Chris@595 491 }
Chris@43 492
Chris@595 493 for (sv_frame_t i = 0; i < fadeIn/2; ++i) {
Chris@595 494 float *back = buffer[c];
Chris@595 495 back -= fadeIn/2;
Chris@595 496 back[i] +=
Chris@436 497 (channelGain * m_channelBuffer[sourceChannel][i] * float(i))
Chris@436 498 / float(fadeIn);
Chris@595 499 }
Chris@43 500
Chris@595 501 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) {
Chris@595 502 float mult = channelGain;
Chris@595 503 if (i < fadeIn/2) {
Chris@595 504 mult = (mult * float(i)) / float(fadeIn);
Chris@595 505 }
Chris@595 506 if (i > frames - fadeOut/2) {
Chris@595 507 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut);
Chris@595 508 }
Chris@382 509 float val = m_channelBuffer[sourceChannel][i];
Chris@80 510 if (i >= got) val = 0.f;
Chris@595 511 buffer[c][i] += mult * val;
Chris@595 512 }
Chris@43 513 }
Chris@43 514
Chris@43 515 return got;
Chris@43 516 }
Chris@43 517
Chris@436 518 sv_frame_t
Chris@313 519 AudioGenerator::mixClipModel(Model *model,
Chris@436 520 sv_frame_t startFrame, sv_frame_t frames,
Chris@313 521 float **buffer, float gain, float pan)
Chris@43 522 {
Chris@308 523 ClipMixer *clipMixer = m_clipMixerMap[model];
Chris@308 524 if (!clipMixer) return 0;
Chris@43 525
Chris@436 526 int blocks = int(frames / m_processingBlockSize);
Chris@43 527
Chris@313 528 //!!! todo: the below -- it matters
Chris@313 529
Chris@43 530 //!!! hang on -- the fact that the audio callback play source's
Chris@43 531 //buffer is a multiple of the plugin's buffer size doesn't mean
Chris@43 532 //that we always get called for a multiple of it here (because it
Chris@43 533 //also depends on the JACK block size). how should we ensure that
Chris@43 534 //all models write the same amount in to the mix, and that we
Chris@43 535 //always have a multiple of the plugin buffer size? I guess this
Chris@43 536 //class has to be queryable for the plugin buffer size & the
Chris@43 537 //callback play source has to use that as a multiple for all the
Chris@43 538 //calls to mixModel
Chris@43 539
Chris@436 540 sv_frame_t got = blocks * m_processingBlockSize;
Chris@43 541
Chris@43 542 #ifdef DEBUG_AUDIO_GENERATOR
Chris@442 543 cout << "mixModel [clip]: start " << startFrame << ", frames " << frames
Chris@442 544 << ", blocks " << blocks << ", have " << m_noteOffs.size()
Chris@442 545 << " note-offs" << endl;
Chris@43 546 #endif
Chris@43 547
Chris@308 548 ClipMixer::NoteStart on;
Chris@308 549 ClipMixer::NoteEnd off;
Chris@43 550
Chris@275 551 NoteOffSet &noteOffs = m_noteOffs[model];
Chris@43 552
Chris@308 553 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@308 554
Chris@366 555 for (int i = 0; i < blocks; ++i) {
Chris@43 556
Chris@595 557 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
Chris@43 558
Chris@299 559 NoteList notes;
Chris@299 560 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
Chris@299 561 if (exportable) {
Chris@366 562 notes = exportable->getNotesWithin(reqStart,
Chris@366 563 reqStart + m_processingBlockSize);
Chris@299 564 }
Chris@43 565
Chris@308 566 std::vector<ClipMixer::NoteStart> starts;
Chris@308 567 std::vector<ClipMixer::NoteEnd> ends;
Chris@43 568
Chris@595 569 for (NoteList::const_iterator ni = notes.begin();
Chris@275 570 ni != notes.end(); ++ni) {
Chris@43 571
Chris@595 572 sv_frame_t noteFrame = ni->start;
Chris@596 573 sv_frame_t noteDuration = ni->duration;
Chris@43 574
Chris@595 575 if (noteFrame < reqStart ||
Chris@596 576 noteFrame >= reqStart + m_processingBlockSize) {
Chris@596 577 continue;
Chris@596 578 }
Chris@596 579
Chris@596 580 if (noteDuration == 0) {
Chris@596 581 // If we have a note-off and a note-on with the same
Chris@596 582 // time, then the note-off will be assumed (in the
Chris@596 583 // logic below that deals with two-point note-on/off
Chris@596 584 // events) to be switching off an earlier note before
Chris@596 585 // this one begins -- that's necessary in order to
Chris@596 586 // support adjoining notes of equal pitch. But it does
Chris@596 587 // mean we have to explicitly ignore zero-duration
Chris@596 588 // notes, otherwise they'll be played without end
Chris@596 589 #ifdef DEBUG_AUDIO_GENERATOR
Chris@596 590 cerr << "mixModel [clip]: zero-duration note found at frame " << noteFrame << ", skipping it" << endl;
Chris@596 591 #endif
Chris@596 592 continue;
Chris@596 593 }
Chris@43 594
Chris@595 595 while (noteOffs.begin() != noteOffs.end() &&
Chris@595 596 noteOffs.begin()->frame <= noteFrame) {
Chris@43 597
Chris@436 598 sv_frame_t eventFrame = noteOffs.begin()->frame;
Chris@308 599 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 600
Chris@308 601 off.frameOffset = eventFrame - reqStart;
Chris@308 602 off.frequency = noteOffs.begin()->frequency;
Chris@43 603
Chris@43 604 #ifdef DEBUG_AUDIO_GENERATOR
Chris@595 605 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 606 #endif
Chris@43 607
Chris@308 608 ends.push_back(off);
Chris@595 609 noteOffs.erase(noteOffs.begin());
Chris@595 610 }
Chris@43 611
Chris@308 612 on.frameOffset = noteFrame - reqStart;
Chris@308 613 on.frequency = ni->getFrequency();
Chris@436 614 on.level = float(ni->velocity) / 127.0f;
Chris@308 615 on.pan = pan;
Chris@43 616
Chris@43 617 #ifdef DEBUG_AUDIO_GENERATOR
Chris@595 618 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
Chris@43 619 #endif
Chris@595 620
Chris@308 621 starts.push_back(on);
Chris@595 622 noteOffs.insert
Chris@596 623 (NoteOff(on.frequency, noteFrame + noteDuration));
Chris@595 624 }
Chris@43 625
Chris@595 626 while (noteOffs.begin() != noteOffs.end() &&
Chris@595 627 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
Chris@43 628
Chris@436 629 sv_frame_t eventFrame = noteOffs.begin()->frame;
Chris@308 630 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 631
Chris@308 632 off.frameOffset = eventFrame - reqStart;
Chris@308 633 off.frequency = noteOffs.begin()->frequency;
Chris@43 634
Chris@43 635 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 636 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 637 #endif
Chris@43 638
Chris@308 639 ends.push_back(off);
Chris@308 640 noteOffs.erase(noteOffs.begin());
Chris@595 641 }
Chris@43 642
Chris@595 643 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@308 644 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@308 645 }
Chris@43 646
Chris@308 647 clipMixer->mix(bufferIndexes, gain, starts, ends);
Chris@308 648 }
Chris@43 649
Chris@308 650 delete[] bufferIndexes;
Chris@43 651
Chris@43 652 return got;
Chris@43 653 }
Chris@313 654
Chris@436 655 sv_frame_t
Chris@313 656 AudioGenerator::mixContinuousSynthModel(Model *model,
Chris@436 657 sv_frame_t startFrame,
Chris@436 658 sv_frame_t frames,
Chris@313 659 float **buffer,
Chris@313 660 float gain,
Chris@313 661 float pan)
Chris@313 662 {
Chris@313 663 ContinuousSynth *synth = m_continuousSynthMap[model];
Chris@313 664 if (!synth) return 0;
Chris@313 665
Chris@313 666 // only type we support here at the moment
Chris@313 667 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
Chris@313 668 if (stvm->getScaleUnits() != "Hz") return 0;
Chris@313 669
Chris@436 670 int blocks = int(frames / m_processingBlockSize);
Chris@313 671
Chris@313 672 //!!! todo: see comment in mixClipModel
Chris@313 673
Chris@436 674 sv_frame_t got = blocks * m_processingBlockSize;
Chris@313 675
Chris@313 676 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 677 cout << "mixModel [synth]: frames " << frames
Chris@595 678 << ", blocks " << blocks << endl;
Chris@313 679 #endif
Chris@313 680
Chris@313 681 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@313 682
Chris@366 683 for (int i = 0; i < blocks; ++i) {
Chris@313 684
Chris@595 685 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
Chris@313 686
Chris@595 687 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@313 688 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@313 689 }
Chris@313 690
Chris@313 691 SparseTimeValueModel::PointList points =
Chris@313 692 stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
Chris@313 693
Chris@313 694 // by default, repeat last frequency
Chris@313 695 float f0 = 0.f;
Chris@313 696
Chris@313 697 // go straight to the last freq that is genuinely in this range
Chris@313 698 for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
Chris@313 699 itr != points.begin(); ) {
Chris@313 700 --itr;
Chris@313 701 if (itr->frame >= reqStart &&
Chris@313 702 itr->frame < reqStart + m_processingBlockSize) {
Chris@313 703 f0 = itr->value;
Chris@313 704 break;
Chris@313 705 }
Chris@313 706 }
Chris@313 707
Chris@314 708 // if we found no such frequency and the next point is further
Chris@314 709 // away than twice the model resolution, go silent (same
Chris@314 710 // criterion TimeValueLayer uses for ending a discrete curve
Chris@314 711 // segment)
Chris@314 712 if (f0 == 0.f) {
Chris@314 713 SparseTimeValueModel::PointList nextPoints =
Chris@314 714 stvm->getNextPoints(reqStart + m_processingBlockSize);
Chris@314 715 if (nextPoints.empty() ||
Chris@314 716 nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
Chris@314 717 f0 = -1.f;
Chris@314 718 }
Chris@314 719 }
Chris@314 720
Chris@315 721 // cerr << "f0 = " << f0 << endl;
Chris@313 722
Chris@313 723 synth->mix(bufferIndexes,
Chris@313 724 gain,
Chris@313 725 pan,
Chris@313 726 f0);
Chris@313 727 }
Chris@313 728
Chris@313 729 delete[] bufferIndexes;
Chris@313 730
Chris@313 731 return got;
Chris@313 732 }
Chris@313 733