annotate audioio/AudioGenerator.cpp @ 342:4eccff14b4d8 tonioni

Much fiddling toward getting sessions and individual audio files to load cleanly when they need quite different handling after load
author Chris Cannam
date Wed, 02 Apr 2014 21:25:56 +0100
parents 5c69d40a0e30
children 0e4332efcc7d
rev   line source
Chris@43 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@43 2
Chris@43 3 /*
Chris@43 4 Sonic Visualiser
Chris@43 5 An audio file viewer and annotation editor.
Chris@43 6 Centre for Digital Music, Queen Mary, University of London.
Chris@43 7 This file copyright 2006 Chris Cannam.
Chris@43 8
Chris@43 9 This program is free software; you can redistribute it and/or
Chris@43 10 modify it under the terms of the GNU General Public License as
Chris@43 11 published by the Free Software Foundation; either version 2 of the
Chris@43 12 License, or (at your option) any later version. See the file
Chris@43 13 COPYING included with this distribution for more information.
Chris@43 14 */
Chris@43 15
Chris@43 16 #include "AudioGenerator.h"
Chris@43 17
Chris@43 18 #include "base/TempDirectory.h"
Chris@43 19 #include "base/PlayParameters.h"
Chris@43 20 #include "base/PlayParameterRepository.h"
Chris@43 21 #include "base/Pitch.h"
Chris@43 22 #include "base/Exceptions.h"
Chris@43 23
Chris@43 24 #include "data/model/NoteModel.h"
Chris@278 25 #include "data/model/FlexiNoteModel.h"
Chris@43 26 #include "data/model/DenseTimeValueModel.h"
Chris@313 27 #include "data/model/SparseTimeValueModel.h"
Chris@43 28 #include "data/model/SparseOneDimensionalModel.h"
Chris@299 29 #include "data/model/NoteData.h"
Chris@43 30
Chris@307 31 #include "ClipMixer.h"
Chris@313 32 #include "ContinuousSynth.h"
Chris@307 33
Chris@43 34 #include <iostream>
Chris@167 35 #include <cmath>
Chris@43 36
Chris@43 37 #include <QDir>
Chris@43 38 #include <QFile>
Chris@43 39
Chris@43 40 const size_t
Chris@315 41 AudioGenerator::m_processingBlockSize = 1024;
Chris@43 42
Chris@43 43 QString
Chris@43 44 AudioGenerator::m_sampleDir = "";
Chris@43 45
Chris@43 46 //#define DEBUG_AUDIO_GENERATOR 1
Chris@43 47
Chris@43 48 AudioGenerator::AudioGenerator() :
Chris@43 49 m_sourceSampleRate(0),
Chris@43 50 m_targetChannelCount(1),
rmb456@323 51 m_waveType(0),
Chris@43 52 m_soloing(false)
Chris@43 53 {
Chris@108 54 initialiseSampleDir();
Chris@43 55
Chris@43 56 connect(PlayParameterRepository::getInstance(),
Chris@309 57 SIGNAL(playClipIdChanged(const Playable *, QString)),
Chris@43 58 this,
Chris@309 59 SLOT(playClipIdChanged(const Playable *, QString)));
Chris@43 60 }
Chris@43 61
Chris@43 62 AudioGenerator::~AudioGenerator()
Chris@43 63 {
Chris@177 64 #ifdef DEBUG_AUDIO_GENERATOR
Chris@233 65 SVDEBUG << "AudioGenerator::~AudioGenerator" << endl;
Chris@177 66 #endif
Chris@43 67 }
Chris@43 68
Chris@108 69 void
Chris@108 70 AudioGenerator::initialiseSampleDir()
Chris@43 71 {
Chris@108 72 if (m_sampleDir != "") return;
Chris@108 73
Chris@108 74 try {
Chris@108 75 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
Chris@108 76 } catch (DirectoryCreationFailed f) {
Chris@293 77 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
Chris@108 78 << " Failed to create temporary sample directory"
Chris@293 79 << endl;
Chris@108 80 m_sampleDir = "";
Chris@108 81 return;
Chris@108 82 }
Chris@108 83
Chris@108 84 QDir sampleResourceDir(":/samples", "*.wav");
Chris@108 85
Chris@108 86 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
Chris@108 87
Chris@108 88 QString fileName(sampleResourceDir[i]);
Chris@108 89 QFile file(sampleResourceDir.filePath(fileName));
Chris@151 90 QString target = QDir(m_sampleDir).filePath(fileName);
Chris@108 91
Chris@151 92 if (!file.copy(target)) {
Chris@293 93 cerr << "WARNING: AudioGenerator::getSampleDir: "
Chris@294 94 << "Unable to copy " << fileName
Chris@108 95 << " into temporary directory \""
Chris@293 96 << m_sampleDir << "\"" << endl;
Chris@151 97 } else {
Chris@151 98 QFile tf(target);
Chris@151 99 tf.setPermissions(tf.permissions() |
Chris@151 100 QFile::WriteOwner |
Chris@151 101 QFile::WriteUser);
Chris@108 102 }
Chris@43 103 }
Chris@43 104 }
Chris@43 105
Chris@43 106 bool
Chris@43 107 AudioGenerator::addModel(Model *model)
Chris@43 108 {
Chris@43 109 if (m_sourceSampleRate == 0) {
Chris@43 110
Chris@43 111 m_sourceSampleRate = model->getSampleRate();
Chris@43 112
Chris@43 113 } else {
Chris@43 114
Chris@43 115 DenseTimeValueModel *dtvm =
Chris@43 116 dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 117
Chris@43 118 if (dtvm) {
Chris@43 119 m_sourceSampleRate = model->getSampleRate();
Chris@43 120 return true;
Chris@43 121 }
Chris@43 122 }
Chris@307 123
Chris@313 124 if (usesClipMixer(model)) {
Chris@313 125 ClipMixer *mixer = makeClipMixerFor(model);
Chris@313 126 if (mixer) {
Chris@313 127 QMutexLocker locker(&m_mutex);
Chris@313 128 m_clipMixerMap[model] = mixer;
Chris@313 129 return true;
Chris@313 130 }
Chris@313 131 }
Chris@313 132
Chris@313 133 if (usesContinuousSynth(model)) {
Chris@313 134 ContinuousSynth *synth = makeSynthFor(model);
Chris@313 135 if (synth) {
Chris@313 136 QMutexLocker locker(&m_mutex);
Chris@313 137 m_continuousSynthMap[model] = synth;
Chris@313 138 return true;
Chris@313 139 }
Chris@43 140 }
Chris@307 141
Chris@43 142 return false;
Chris@43 143 }
Chris@43 144
Chris@43 145 void
Chris@309 146 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
Chris@43 147 {
Chris@108 148 const Model *model = dynamic_cast<const Model *>(playable);
Chris@108 149 if (!model) {
Chris@309 150 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
Chris@108 151 << playable << " is not a supported model type"
Chris@293 152 << endl;
Chris@108 153 return;
Chris@108 154 }
Chris@108 155
Chris@307 156 if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
Chris@307 157
Chris@307 158 ClipMixer *mixer = makeClipMixerFor(model);
Chris@307 159 if (mixer) {
Chris@43 160 QMutexLocker locker(&m_mutex);
Chris@307 161 m_clipMixerMap[model] = mixer;
Chris@43 162 }
Chris@43 163 }
Chris@308 164
Chris@313 165 bool
Chris@313 166 AudioGenerator::usesClipMixer(const Model *model)
Chris@43 167 {
Chris@313 168 bool clip =
Chris@313 169 (qobject_cast<const SparseOneDimensionalModel *>(model) ||
Chris@313 170 qobject_cast<const NoteModel *>(model) ||
Chris@313 171 qobject_cast<const FlexiNoteModel *>(model));
Chris@313 172 return clip;
Chris@43 173 }
Chris@43 174
Chris@313 175 bool
Chris@313 176 AudioGenerator::usesContinuousSynth(const Model *model)
Chris@43 177 {
Chris@313 178 bool cont =
Chris@313 179 (qobject_cast<const SparseTimeValueModel *>(model));
Chris@313 180 return cont;
Chris@313 181 }
Chris@313 182
Chris@307 183 ClipMixer *
Chris@307 184 AudioGenerator::makeClipMixerFor(const Model *model)
Chris@43 185 {
Chris@309 186 QString clipId;
Chris@43 187
Chris@108 188 const Playable *playable = model;
Chris@108 189 if (!playable || !playable->canPlay()) return 0;
Chris@108 190
Chris@43 191 PlayParameters *parameters =
Chris@108 192 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 193 if (parameters) {
Chris@309 194 clipId = parameters->getPlayClipId();
Chris@43 195 }
Chris@43 196
Chris@309 197 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
Chris@276 198
Chris@309 199 if (clipId == "") {
Chris@308 200 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
Chris@276 201 return 0;
Chris@276 202 }
Chris@43 203
Chris@308 204 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
Chris@308 205 m_sourceSampleRate,
Chris@308 206 m_processingBlockSize);
Chris@307 207
Chris@308 208 float clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0f); // required
Chris@307 209
Chris@309 210 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
Chris@307 211
Chris@308 212 if (!mixer->loadClipData(clipPath, clipF0)) {
Chris@308 213 delete mixer;
Chris@43 214 return 0;
Chris@43 215 }
Chris@43 216
Chris@309 217 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
Chris@43 218
Chris@308 219 return mixer;
Chris@308 220 }
Chris@43 221
Chris@313 222 ContinuousSynth *
Chris@313 223 AudioGenerator::makeSynthFor(const Model *model)
Chris@313 224 {
Chris@313 225 const Playable *playable = model;
Chris@313 226 if (!playable || !playable->canPlay()) return 0;
Chris@313 227
Chris@313 228 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
Chris@313 229 m_sourceSampleRate,
rmb456@323 230 m_processingBlockSize,
rmb456@323 231 m_waveType);
Chris@313 232
Chris@313 233 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
Chris@313 234
Chris@313 235 return synth;
Chris@313 236 }
Chris@313 237
Chris@43 238 void
Chris@43 239 AudioGenerator::removeModel(Model *model)
Chris@43 240 {
Chris@43 241 SparseOneDimensionalModel *sodm =
Chris@43 242 dynamic_cast<SparseOneDimensionalModel *>(model);
Chris@43 243 if (!sodm) return; // nothing to do
Chris@43 244
Chris@43 245 QMutexLocker locker(&m_mutex);
Chris@43 246
Chris@308 247 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
Chris@43 248
Chris@308 249 ClipMixer *mixer = m_clipMixerMap[sodm];
Chris@308 250 m_clipMixerMap.erase(sodm);
Chris@308 251 delete mixer;
Chris@43 252 }
Chris@43 253
Chris@43 254 void
Chris@43 255 AudioGenerator::clearModels()
Chris@43 256 {
Chris@43 257 QMutexLocker locker(&m_mutex);
Chris@308 258
Chris@308 259 while (!m_clipMixerMap.empty()) {
Chris@308 260 ClipMixer *mixer = m_clipMixerMap.begin()->second;
Chris@308 261 m_clipMixerMap.erase(m_clipMixerMap.begin());
Chris@308 262 delete mixer;
Chris@43 263 }
Chris@43 264 }
Chris@43 265
Chris@43 266 void
Chris@43 267 AudioGenerator::reset()
Chris@43 268 {
Chris@43 269 QMutexLocker locker(&m_mutex);
Chris@308 270
Chris@308 271 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@43 272 if (i->second) {
Chris@308 273 i->second->reset();
Chris@43 274 }
Chris@43 275 }
Chris@43 276
Chris@43 277 m_noteOffs.clear();
Chris@43 278 }
Chris@43 279
Chris@43 280 void
Chris@43 281 AudioGenerator::setTargetChannelCount(size_t targetChannelCount)
Chris@43 282 {
Chris@43 283 if (m_targetChannelCount == targetChannelCount) return;
Chris@43 284
Chris@233 285 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
Chris@43 286
Chris@43 287 QMutexLocker locker(&m_mutex);
Chris@43 288 m_targetChannelCount = targetChannelCount;
Chris@43 289
Chris@308 290 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@308 291 if (i->second) i->second->setChannelCount(targetChannelCount);
Chris@43 292 }
Chris@43 293 }
Chris@43 294
Chris@43 295 size_t
Chris@43 296 AudioGenerator::getBlockSize() const
Chris@43 297 {
Chris@305 298 return m_processingBlockSize;
Chris@43 299 }
Chris@43 300
Chris@43 301 void
Chris@43 302 AudioGenerator::setSoloModelSet(std::set<Model *> s)
Chris@43 303 {
Chris@43 304 QMutexLocker locker(&m_mutex);
Chris@43 305
Chris@43 306 m_soloModelSet = s;
Chris@43 307 m_soloing = true;
Chris@43 308 }
Chris@43 309
Chris@43 310 void
Chris@43 311 AudioGenerator::clearSoloModelSet()
Chris@43 312 {
Chris@43 313 QMutexLocker locker(&m_mutex);
Chris@43 314
Chris@43 315 m_soloModelSet.clear();
Chris@43 316 m_soloing = false;
Chris@43 317 }
Chris@43 318
Chris@43 319 size_t
Chris@43 320 AudioGenerator::mixModel(Model *model, size_t startFrame, size_t frameCount,
Chris@43 321 float **buffer, size_t fadeIn, size_t fadeOut)
Chris@43 322 {
Chris@43 323 if (m_sourceSampleRate == 0) {
Chris@293 324 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
Chris@43 325 return frameCount;
Chris@43 326 }
Chris@43 327
Chris@43 328 QMutexLocker locker(&m_mutex);
Chris@43 329
Chris@108 330 Playable *playable = model;
Chris@108 331 if (!playable || !playable->canPlay()) return frameCount;
Chris@108 332
Chris@43 333 PlayParameters *parameters =
Chris@108 334 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 335 if (!parameters) return frameCount;
Chris@43 336
Chris@43 337 bool playing = !parameters->isPlayMuted();
Chris@43 338 if (!playing) {
Chris@43 339 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 340 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
Chris@43 341 #endif
Chris@43 342 return frameCount;
Chris@43 343 }
Chris@43 344
Chris@43 345 if (m_soloing) {
Chris@43 346 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
Chris@43 347 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 348 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
Chris@43 349 #endif
Chris@43 350 return frameCount;
Chris@43 351 }
Chris@43 352 }
Chris@43 353
Chris@43 354 float gain = parameters->getPlayGain();
Chris@43 355 float pan = parameters->getPlayPan();
Chris@43 356
Chris@43 357 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 358 if (dtvm) {
Chris@43 359 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
Chris@43 360 buffer, gain, pan, fadeIn, fadeOut);
Chris@43 361 }
Chris@43 362
Chris@313 363 if (usesClipMixer(model)) {
Chris@313 364 return mixClipModel(model, startFrame, frameCount,
Chris@313 365 buffer, gain, pan);
Chris@313 366 }
Chris@43 367
Chris@313 368 if (usesContinuousSynth(model)) {
Chris@313 369 return mixContinuousSynthModel(model, startFrame, frameCount,
Chris@313 370 buffer, gain, pan);
Chris@43 371 }
Chris@43 372
Chris@276 373 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
Chris@276 374
Chris@43 375 return frameCount;
Chris@43 376 }
Chris@43 377
Chris@43 378 size_t
Chris@43 379 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
Chris@43 380 size_t startFrame, size_t frames,
Chris@43 381 float **buffer, float gain, float pan,
Chris@43 382 size_t fadeIn, size_t fadeOut)
Chris@43 383 {
Chris@80 384 static float **channelBuffer = 0;
Chris@80 385 static size_t channelBufSiz = 0;
Chris@80 386 static size_t channelBufCount = 0;
Chris@43 387
Chris@43 388 size_t totalFrames = frames + fadeIn/2 + fadeOut/2;
Chris@43 389
Chris@80 390 size_t modelChannels = dtvm->getChannelCount();
Chris@80 391
Chris@80 392 if (channelBufSiz < totalFrames || channelBufCount < modelChannels) {
Chris@80 393
Chris@80 394 for (size_t c = 0; c < channelBufCount; ++c) {
Chris@80 395 delete[] channelBuffer[c];
Chris@80 396 }
Chris@80 397
Chris@43 398 delete[] channelBuffer;
Chris@80 399 channelBuffer = new float *[modelChannels];
Chris@80 400
Chris@80 401 for (size_t c = 0; c < modelChannels; ++c) {
Chris@80 402 channelBuffer[c] = new float[totalFrames];
Chris@80 403 }
Chris@80 404
Chris@80 405 channelBufCount = modelChannels;
Chris@43 406 channelBufSiz = totalFrames;
Chris@43 407 }
Chris@80 408
Chris@43 409 size_t got = 0;
Chris@80 410
Chris@80 411 if (startFrame >= fadeIn/2) {
Chris@80 412 got = dtvm->getData(0, modelChannels - 1,
Chris@80 413 startFrame - fadeIn/2,
Chris@80 414 frames + fadeOut/2 + fadeIn/2,
Chris@80 415 channelBuffer);
Chris@80 416 } else {
Chris@80 417 size_t missing = fadeIn/2 - startFrame;
Chris@80 418
Chris@80 419 for (size_t c = 0; c < modelChannels; ++c) {
Chris@80 420 channelBuffer[c] += missing;
Chris@80 421 }
Chris@80 422
Chris@80 423 got = dtvm->getData(0, modelChannels - 1,
Chris@80 424 startFrame,
Chris@80 425 frames + fadeOut/2,
Chris@80 426 channelBuffer);
Chris@80 427
Chris@80 428 for (size_t c = 0; c < modelChannels; ++c) {
Chris@80 429 channelBuffer[c] -= missing;
Chris@80 430 }
Chris@80 431
Chris@80 432 got += missing;
Chris@80 433 }
Chris@43 434
Chris@43 435 for (size_t c = 0; c < m_targetChannelCount; ++c) {
Chris@43 436
Chris@80 437 size_t sourceChannel = (c % modelChannels);
Chris@43 438
Chris@233 439 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
Chris@43 440
Chris@43 441 float channelGain = gain;
Chris@43 442 if (pan != 0.0) {
Chris@43 443 if (c == 0) {
Chris@43 444 if (pan > 0.0) channelGain *= 1.0 - pan;
Chris@43 445 } else {
Chris@43 446 if (pan < 0.0) channelGain *= pan + 1.0;
Chris@43 447 }
Chris@43 448 }
Chris@43 449
Chris@43 450 for (size_t i = 0; i < fadeIn/2; ++i) {
Chris@43 451 float *back = buffer[c];
Chris@43 452 back -= fadeIn/2;
Chris@80 453 back[i] += (channelGain * channelBuffer[sourceChannel][i] * i) / fadeIn;
Chris@43 454 }
Chris@43 455
Chris@43 456 for (size_t i = 0; i < frames + fadeOut/2; ++i) {
Chris@43 457 float mult = channelGain;
Chris@43 458 if (i < fadeIn/2) {
Chris@43 459 mult = (mult * i) / fadeIn;
Chris@43 460 }
Chris@43 461 if (i > frames - fadeOut/2) {
Chris@43 462 mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut;
Chris@43 463 }
Chris@80 464 float val = channelBuffer[sourceChannel][i];
Chris@80 465 if (i >= got) val = 0.f;
Chris@80 466 buffer[c][i] += mult * val;
Chris@43 467 }
Chris@43 468 }
Chris@43 469
Chris@43 470 return got;
Chris@43 471 }
Chris@43 472
Chris@43 473 size_t
Chris@313 474 AudioGenerator::mixClipModel(Model *model,
Chris@313 475 size_t startFrame, size_t frames,
Chris@313 476 float **buffer, float gain, float pan)
Chris@43 477 {
Chris@308 478 ClipMixer *clipMixer = m_clipMixerMap[model];
Chris@308 479 if (!clipMixer) return 0;
Chris@43 480
Chris@305 481 size_t blocks = frames / m_processingBlockSize;
Chris@43 482
Chris@313 483 //!!! todo: the below -- it matters
Chris@313 484
Chris@43 485 //!!! hang on -- the fact that the audio callback play source's
Chris@43 486 //buffer is a multiple of the plugin's buffer size doesn't mean
Chris@43 487 //that we always get called for a multiple of it here (because it
Chris@43 488 //also depends on the JACK block size). how should we ensure that
Chris@43 489 //all models write the same amount in to the mix, and that we
Chris@43 490 //always have a multiple of the plugin buffer size? I guess this
Chris@43 491 //class has to be queryable for the plugin buffer size & the
Chris@43 492 //callback play source has to use that as a multiple for all the
Chris@43 493 //calls to mixModel
Chris@43 494
Chris@305 495 size_t got = blocks * m_processingBlockSize;
Chris@43 496
Chris@43 497 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 498 cout << "mixModel [clip]: frames " << frames
Chris@293 499 << ", blocks " << blocks << endl;
Chris@43 500 #endif
Chris@43 501
Chris@308 502 ClipMixer::NoteStart on;
Chris@308 503 ClipMixer::NoteEnd off;
Chris@43 504
Chris@275 505 NoteOffSet &noteOffs = m_noteOffs[model];
Chris@43 506
Chris@308 507 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@308 508
Chris@43 509 for (size_t i = 0; i < blocks; ++i) {
Chris@43 510
Chris@305 511 size_t reqStart = startFrame + i * m_processingBlockSize;
Chris@43 512
Chris@299 513 NoteList notes;
Chris@299 514 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
Chris@299 515 if (exportable) {
Chris@308 516 notes = exportable->getNotes(reqStart,
Chris@308 517 reqStart + m_processingBlockSize);
Chris@299 518 }
Chris@43 519
Chris@308 520 std::vector<ClipMixer::NoteStart> starts;
Chris@308 521 std::vector<ClipMixer::NoteEnd> ends;
Chris@43 522
Chris@275 523 for (NoteList::const_iterator ni = notes.begin();
Chris@275 524 ni != notes.end(); ++ni) {
Chris@43 525
Chris@275 526 size_t noteFrame = ni->start;
Chris@43 527
Chris@275 528 if (noteFrame < reqStart ||
Chris@305 529 noteFrame >= reqStart + m_processingBlockSize) continue;
Chris@43 530
Chris@43 531 while (noteOffs.begin() != noteOffs.end() &&
Chris@275 532 noteOffs.begin()->frame <= noteFrame) {
Chris@43 533
Chris@308 534 size_t eventFrame = noteOffs.begin()->frame;
Chris@308 535 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 536
Chris@308 537 off.frameOffset = eventFrame - reqStart;
Chris@308 538 off.frequency = noteOffs.begin()->frequency;
Chris@43 539
Chris@43 540 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 541 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 542 #endif
Chris@43 543
Chris@308 544 ends.push_back(off);
Chris@43 545 noteOffs.erase(noteOffs.begin());
Chris@43 546 }
Chris@43 547
Chris@308 548 on.frameOffset = noteFrame - reqStart;
Chris@308 549 on.frequency = ni->getFrequency();
Chris@308 550 on.level = float(ni->velocity) / 127.0;
Chris@308 551 on.pan = pan;
Chris@43 552
Chris@43 553 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 554 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << endl;
Chris@43 555 #endif
Chris@43 556
Chris@308 557 starts.push_back(on);
Chris@275 558 noteOffs.insert
Chris@308 559 (NoteOff(on.frequency, noteFrame + ni->duration));
Chris@43 560 }
Chris@43 561
Chris@43 562 while (noteOffs.begin() != noteOffs.end() &&
Chris@308 563 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
Chris@43 564
Chris@308 565 size_t eventFrame = noteOffs.begin()->frame;
Chris@308 566 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 567
Chris@308 568 off.frameOffset = eventFrame - reqStart;
Chris@308 569 off.frequency = noteOffs.begin()->frequency;
Chris@43 570
Chris@43 571 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 572 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 573 #endif
Chris@43 574
Chris@308 575 ends.push_back(off);
Chris@308 576 noteOffs.erase(noteOffs.begin());
Chris@43 577 }
Chris@43 578
Chris@43 579 for (size_t c = 0; c < m_targetChannelCount; ++c) {
Chris@308 580 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@308 581 }
Chris@43 582
Chris@308 583 clipMixer->mix(bufferIndexes, gain, starts, ends);
Chris@308 584 }
Chris@43 585
Chris@308 586 delete[] bufferIndexes;
Chris@43 587
Chris@43 588 return got;
Chris@43 589 }
Chris@313 590
Chris@313 591 size_t
Chris@313 592 AudioGenerator::mixContinuousSynthModel(Model *model,
Chris@313 593 size_t startFrame,
Chris@313 594 size_t frames,
Chris@313 595 float **buffer,
Chris@313 596 float gain,
Chris@313 597 float pan)
Chris@313 598 {
Chris@313 599 ContinuousSynth *synth = m_continuousSynthMap[model];
Chris@313 600 if (!synth) return 0;
Chris@313 601
Chris@313 602 // only type we support here at the moment
Chris@313 603 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
Chris@313 604 if (stvm->getScaleUnits() != "Hz") return 0;
Chris@313 605
Chris@313 606 size_t blocks = frames / m_processingBlockSize;
Chris@313 607
Chris@313 608 //!!! todo: see comment in mixClipModel
Chris@313 609
Chris@313 610 size_t got = blocks * m_processingBlockSize;
Chris@313 611
Chris@313 612 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 613 cout << "mixModel [synth]: frames " << frames
Chris@313 614 << ", blocks " << blocks << endl;
Chris@313 615 #endif
Chris@313 616
Chris@313 617 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@313 618
Chris@313 619 for (size_t i = 0; i < blocks; ++i) {
Chris@313 620
Chris@313 621 size_t reqStart = startFrame + i * m_processingBlockSize;
Chris@313 622
Chris@313 623 for (size_t c = 0; c < m_targetChannelCount; ++c) {
Chris@313 624 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@313 625 }
Chris@313 626
Chris@313 627 SparseTimeValueModel::PointList points =
Chris@313 628 stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
Chris@313 629
Chris@313 630 // by default, repeat last frequency
Chris@313 631 float f0 = 0.f;
Chris@313 632
Chris@313 633 // go straight to the last freq that is genuinely in this range
Chris@313 634 for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
Chris@313 635 itr != points.begin(); ) {
Chris@313 636 --itr;
Chris@313 637 if (itr->frame >= reqStart &&
Chris@313 638 itr->frame < reqStart + m_processingBlockSize) {
Chris@313 639 f0 = itr->value;
Chris@313 640 break;
Chris@313 641 }
Chris@313 642 }
Chris@313 643
Chris@314 644 // if we found no such frequency and the next point is further
Chris@314 645 // away than twice the model resolution, go silent (same
Chris@314 646 // criterion TimeValueLayer uses for ending a discrete curve
Chris@314 647 // segment)
Chris@314 648 if (f0 == 0.f) {
Chris@314 649 SparseTimeValueModel::PointList nextPoints =
Chris@314 650 stvm->getNextPoints(reqStart + m_processingBlockSize);
Chris@314 651 if (nextPoints.empty() ||
Chris@314 652 nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
Chris@314 653 f0 = -1.f;
Chris@314 654 }
Chris@314 655 }
Chris@314 656
Chris@315 657 // cerr << "f0 = " << f0 << endl;
Chris@313 658
Chris@313 659 synth->mix(bufferIndexes,
Chris@313 660 gain,
Chris@313 661 pan,
Chris@313 662 f0);
Chris@313 663 }
Chris@313 664
Chris@313 665 delete[] bufferIndexes;
Chris@313 666
Chris@313 667 return got;
Chris@313 668 }
Chris@313 669