annotate audioio/AudioGenerator.cpp @ 313:58582119c92a tonioni

Add a basic continuous synth implementation (simple sinusoids only, no gaps)
author Chris Cannam
date Wed, 08 Jan 2014 13:07:22 +0000
parents 71050ffd0141
children 817ad10f91d1
rev   line source
Chris@43 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@43 2
Chris@43 3 /*
Chris@43 4 Sonic Visualiser
Chris@43 5 An audio file viewer and annotation editor.
Chris@43 6 Centre for Digital Music, Queen Mary, University of London.
Chris@43 7 This file copyright 2006 Chris Cannam.
Chris@43 8
Chris@43 9 This program is free software; you can redistribute it and/or
Chris@43 10 modify it under the terms of the GNU General Public License as
Chris@43 11 published by the Free Software Foundation; either version 2 of the
Chris@43 12 License, or (at your option) any later version. See the file
Chris@43 13 COPYING included with this distribution for more information.
Chris@43 14 */
Chris@43 15
Chris@43 16 #include "AudioGenerator.h"
Chris@43 17
Chris@43 18 #include "base/TempDirectory.h"
Chris@43 19 #include "base/PlayParameters.h"
Chris@43 20 #include "base/PlayParameterRepository.h"
Chris@43 21 #include "base/Pitch.h"
Chris@43 22 #include "base/Exceptions.h"
Chris@43 23
Chris@43 24 #include "data/model/NoteModel.h"
Chris@278 25 #include "data/model/FlexiNoteModel.h"
Chris@43 26 #include "data/model/DenseTimeValueModel.h"
Chris@313 27 #include "data/model/SparseTimeValueModel.h"
Chris@43 28 #include "data/model/SparseOneDimensionalModel.h"
Chris@299 29 #include "data/model/NoteData.h"
Chris@43 30
Chris@307 31 #include "ClipMixer.h"
Chris@313 32 #include "ContinuousSynth.h"
Chris@307 33
Chris@43 34 #include <iostream>
Chris@167 35 #include <cmath>
Chris@43 36
Chris@43 37 #include <QDir>
Chris@43 38 #include <QFile>
Chris@43 39
Chris@43 40 const size_t
Chris@305 41 AudioGenerator::m_processingBlockSize = 2048;
Chris@43 42
Chris@43 43 QString
Chris@43 44 AudioGenerator::m_sampleDir = "";
Chris@43 45
Chris@43 46 //#define DEBUG_AUDIO_GENERATOR 1
Chris@43 47
Chris@43 48 AudioGenerator::AudioGenerator() :
Chris@43 49 m_sourceSampleRate(0),
Chris@43 50 m_targetChannelCount(1),
Chris@43 51 m_soloing(false)
Chris@43 52 {
Chris@108 53 initialiseSampleDir();
Chris@43 54
Chris@43 55 connect(PlayParameterRepository::getInstance(),
Chris@309 56 SIGNAL(playClipIdChanged(const Playable *, QString)),
Chris@43 57 this,
Chris@309 58 SLOT(playClipIdChanged(const Playable *, QString)));
Chris@43 59 }
Chris@43 60
Chris@43 61 AudioGenerator::~AudioGenerator()
Chris@43 62 {
Chris@177 63 #ifdef DEBUG_AUDIO_GENERATOR
Chris@233 64 SVDEBUG << "AudioGenerator::~AudioGenerator" << endl;
Chris@177 65 #endif
Chris@43 66 }
Chris@43 67
Chris@108 68 void
Chris@108 69 AudioGenerator::initialiseSampleDir()
Chris@43 70 {
Chris@108 71 if (m_sampleDir != "") return;
Chris@108 72
Chris@108 73 try {
Chris@108 74 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
Chris@108 75 } catch (DirectoryCreationFailed f) {
Chris@293 76 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
Chris@108 77 << " Failed to create temporary sample directory"
Chris@293 78 << endl;
Chris@108 79 m_sampleDir = "";
Chris@108 80 return;
Chris@108 81 }
Chris@108 82
Chris@108 83 QDir sampleResourceDir(":/samples", "*.wav");
Chris@108 84
Chris@108 85 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
Chris@108 86
Chris@108 87 QString fileName(sampleResourceDir[i]);
Chris@108 88 QFile file(sampleResourceDir.filePath(fileName));
Chris@151 89 QString target = QDir(m_sampleDir).filePath(fileName);
Chris@108 90
Chris@151 91 if (!file.copy(target)) {
Chris@293 92 cerr << "WARNING: AudioGenerator::getSampleDir: "
Chris@294 93 << "Unable to copy " << fileName
Chris@108 94 << " into temporary directory \""
Chris@293 95 << m_sampleDir << "\"" << endl;
Chris@151 96 } else {
Chris@151 97 QFile tf(target);
Chris@151 98 tf.setPermissions(tf.permissions() |
Chris@151 99 QFile::WriteOwner |
Chris@151 100 QFile::WriteUser);
Chris@108 101 }
Chris@43 102 }
Chris@43 103 }
Chris@43 104
Chris@43 105 bool
Chris@43 106 AudioGenerator::addModel(Model *model)
Chris@43 107 {
Chris@43 108 if (m_sourceSampleRate == 0) {
Chris@43 109
Chris@43 110 m_sourceSampleRate = model->getSampleRate();
Chris@43 111
Chris@43 112 } else {
Chris@43 113
Chris@43 114 DenseTimeValueModel *dtvm =
Chris@43 115 dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 116
Chris@43 117 if (dtvm) {
Chris@43 118 m_sourceSampleRate = model->getSampleRate();
Chris@43 119 return true;
Chris@43 120 }
Chris@43 121 }
Chris@307 122
Chris@313 123 if (usesClipMixer(model)) {
Chris@313 124 ClipMixer *mixer = makeClipMixerFor(model);
Chris@313 125 if (mixer) {
Chris@313 126 QMutexLocker locker(&m_mutex);
Chris@313 127 m_clipMixerMap[model] = mixer;
Chris@313 128 return true;
Chris@313 129 }
Chris@313 130 }
Chris@313 131
Chris@313 132 if (usesContinuousSynth(model)) {
Chris@313 133 ContinuousSynth *synth = makeSynthFor(model);
Chris@313 134 if (synth) {
Chris@313 135 QMutexLocker locker(&m_mutex);
Chris@313 136 m_continuousSynthMap[model] = synth;
Chris@313 137 return true;
Chris@313 138 }
Chris@43 139 }
Chris@307 140
Chris@43 141 return false;
Chris@43 142 }
Chris@43 143
Chris@43 144 void
Chris@309 145 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
Chris@43 146 {
Chris@108 147 const Model *model = dynamic_cast<const Model *>(playable);
Chris@108 148 if (!model) {
Chris@309 149 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
Chris@108 150 << playable << " is not a supported model type"
Chris@293 151 << endl;
Chris@108 152 return;
Chris@108 153 }
Chris@108 154
Chris@307 155 if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
Chris@307 156
Chris@307 157 ClipMixer *mixer = makeClipMixerFor(model);
Chris@307 158 if (mixer) {
Chris@43 159 QMutexLocker locker(&m_mutex);
Chris@307 160 m_clipMixerMap[model] = mixer;
Chris@43 161 }
Chris@43 162 }
Chris@308 163
Chris@313 164 bool
Chris@313 165 AudioGenerator::usesClipMixer(const Model *model)
Chris@43 166 {
Chris@313 167 bool clip =
Chris@313 168 (qobject_cast<const SparseOneDimensionalModel *>(model) ||
Chris@313 169 qobject_cast<const NoteModel *>(model) ||
Chris@313 170 qobject_cast<const FlexiNoteModel *>(model));
Chris@313 171 return clip;
Chris@43 172 }
Chris@43 173
Chris@313 174 bool
Chris@313 175 AudioGenerator::usesContinuousSynth(const Model *model)
Chris@43 176 {
Chris@313 177 bool cont =
Chris@313 178 (qobject_cast<const SparseTimeValueModel *>(model));
Chris@313 179 return cont;
Chris@313 180 }
Chris@313 181
Chris@307 182 ClipMixer *
Chris@307 183 AudioGenerator::makeClipMixerFor(const Model *model)
Chris@43 184 {
Chris@309 185 QString clipId;
Chris@43 186
Chris@108 187 const Playable *playable = model;
Chris@108 188 if (!playable || !playable->canPlay()) return 0;
Chris@108 189
Chris@43 190 PlayParameters *parameters =
Chris@108 191 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 192 if (parameters) {
Chris@309 193 clipId = parameters->getPlayClipId();
Chris@43 194 }
Chris@43 195
Chris@309 196 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
Chris@276 197
Chris@309 198 if (clipId == "") {
Chris@308 199 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
Chris@276 200 return 0;
Chris@276 201 }
Chris@43 202
Chris@308 203 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
Chris@308 204 m_sourceSampleRate,
Chris@308 205 m_processingBlockSize);
Chris@307 206
Chris@308 207 float clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0f); // required
Chris@307 208
Chris@309 209 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
Chris@307 210
Chris@308 211 if (!mixer->loadClipData(clipPath, clipF0)) {
Chris@308 212 delete mixer;
Chris@43 213 return 0;
Chris@43 214 }
Chris@43 215
Chris@309 216 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
Chris@43 217
Chris@308 218 return mixer;
Chris@308 219 }
Chris@43 220
Chris@313 221 ContinuousSynth *
Chris@313 222 AudioGenerator::makeSynthFor(const Model *model)
Chris@313 223 {
Chris@313 224 const Playable *playable = model;
Chris@313 225 if (!playable || !playable->canPlay()) return 0;
Chris@313 226
Chris@313 227 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
Chris@313 228 m_sourceSampleRate,
Chris@313 229 m_processingBlockSize);
Chris@313 230
Chris@313 231 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
Chris@313 232
Chris@313 233 return synth;
Chris@313 234 }
Chris@313 235
Chris@43 236 void
Chris@43 237 AudioGenerator::removeModel(Model *model)
Chris@43 238 {
Chris@43 239 SparseOneDimensionalModel *sodm =
Chris@43 240 dynamic_cast<SparseOneDimensionalModel *>(model);
Chris@43 241 if (!sodm) return; // nothing to do
Chris@43 242
Chris@43 243 QMutexLocker locker(&m_mutex);
Chris@43 244
Chris@308 245 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
Chris@43 246
Chris@308 247 ClipMixer *mixer = m_clipMixerMap[sodm];
Chris@308 248 m_clipMixerMap.erase(sodm);
Chris@308 249 delete mixer;
Chris@43 250 }
Chris@43 251
Chris@43 252 void
Chris@43 253 AudioGenerator::clearModels()
Chris@43 254 {
Chris@43 255 QMutexLocker locker(&m_mutex);
Chris@308 256
Chris@308 257 while (!m_clipMixerMap.empty()) {
Chris@308 258 ClipMixer *mixer = m_clipMixerMap.begin()->second;
Chris@308 259 m_clipMixerMap.erase(m_clipMixerMap.begin());
Chris@308 260 delete mixer;
Chris@43 261 }
Chris@43 262 }
Chris@43 263
Chris@43 264 void
Chris@43 265 AudioGenerator::reset()
Chris@43 266 {
Chris@43 267 QMutexLocker locker(&m_mutex);
Chris@308 268
Chris@308 269 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@43 270 if (i->second) {
Chris@308 271 i->second->reset();
Chris@43 272 }
Chris@43 273 }
Chris@43 274
Chris@43 275 m_noteOffs.clear();
Chris@43 276 }
Chris@43 277
Chris@43 278 void
Chris@43 279 AudioGenerator::setTargetChannelCount(size_t targetChannelCount)
Chris@43 280 {
Chris@43 281 if (m_targetChannelCount == targetChannelCount) return;
Chris@43 282
Chris@233 283 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
Chris@43 284
Chris@43 285 QMutexLocker locker(&m_mutex);
Chris@43 286 m_targetChannelCount = targetChannelCount;
Chris@43 287
Chris@308 288 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@308 289 if (i->second) i->second->setChannelCount(targetChannelCount);
Chris@43 290 }
Chris@43 291 }
Chris@43 292
Chris@43 293 size_t
Chris@43 294 AudioGenerator::getBlockSize() const
Chris@43 295 {
Chris@305 296 return m_processingBlockSize;
Chris@43 297 }
Chris@43 298
Chris@43 299 void
Chris@43 300 AudioGenerator::setSoloModelSet(std::set<Model *> s)
Chris@43 301 {
Chris@43 302 QMutexLocker locker(&m_mutex);
Chris@43 303
Chris@43 304 m_soloModelSet = s;
Chris@43 305 m_soloing = true;
Chris@43 306 }
Chris@43 307
Chris@43 308 void
Chris@43 309 AudioGenerator::clearSoloModelSet()
Chris@43 310 {
Chris@43 311 QMutexLocker locker(&m_mutex);
Chris@43 312
Chris@43 313 m_soloModelSet.clear();
Chris@43 314 m_soloing = false;
Chris@43 315 }
Chris@43 316
Chris@43 317 size_t
Chris@43 318 AudioGenerator::mixModel(Model *model, size_t startFrame, size_t frameCount,
Chris@43 319 float **buffer, size_t fadeIn, size_t fadeOut)
Chris@43 320 {
Chris@43 321 if (m_sourceSampleRate == 0) {
Chris@293 322 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
Chris@43 323 return frameCount;
Chris@43 324 }
Chris@43 325
Chris@43 326 QMutexLocker locker(&m_mutex);
Chris@43 327
Chris@108 328 Playable *playable = model;
Chris@108 329 if (!playable || !playable->canPlay()) return frameCount;
Chris@108 330
Chris@43 331 PlayParameters *parameters =
Chris@108 332 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 333 if (!parameters) return frameCount;
Chris@43 334
Chris@43 335 bool playing = !parameters->isPlayMuted();
Chris@43 336 if (!playing) {
Chris@43 337 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 338 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
Chris@43 339 #endif
Chris@43 340 return frameCount;
Chris@43 341 }
Chris@43 342
Chris@43 343 if (m_soloing) {
Chris@43 344 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
Chris@43 345 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 346 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
Chris@43 347 #endif
Chris@43 348 return frameCount;
Chris@43 349 }
Chris@43 350 }
Chris@43 351
Chris@43 352 float gain = parameters->getPlayGain();
Chris@43 353 float pan = parameters->getPlayPan();
Chris@43 354
Chris@43 355 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 356 if (dtvm) {
Chris@43 357 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
Chris@43 358 buffer, gain, pan, fadeIn, fadeOut);
Chris@43 359 }
Chris@43 360
Chris@313 361 if (usesClipMixer(model)) {
Chris@313 362 return mixClipModel(model, startFrame, frameCount,
Chris@313 363 buffer, gain, pan);
Chris@313 364 }
Chris@43 365
Chris@313 366 if (usesContinuousSynth(model)) {
Chris@313 367 return mixContinuousSynthModel(model, startFrame, frameCount,
Chris@313 368 buffer, gain, pan);
Chris@43 369 }
Chris@43 370
Chris@276 371 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
Chris@276 372
Chris@43 373 return frameCount;
Chris@43 374 }
Chris@43 375
Chris@43 376 size_t
Chris@43 377 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
Chris@43 378 size_t startFrame, size_t frames,
Chris@43 379 float **buffer, float gain, float pan,
Chris@43 380 size_t fadeIn, size_t fadeOut)
Chris@43 381 {
Chris@80 382 static float **channelBuffer = 0;
Chris@80 383 static size_t channelBufSiz = 0;
Chris@80 384 static size_t channelBufCount = 0;
Chris@43 385
Chris@43 386 size_t totalFrames = frames + fadeIn/2 + fadeOut/2;
Chris@43 387
Chris@80 388 size_t modelChannels = dtvm->getChannelCount();
Chris@80 389
Chris@80 390 if (channelBufSiz < totalFrames || channelBufCount < modelChannels) {
Chris@80 391
Chris@80 392 for (size_t c = 0; c < channelBufCount; ++c) {
Chris@80 393 delete[] channelBuffer[c];
Chris@80 394 }
Chris@80 395
Chris@43 396 delete[] channelBuffer;
Chris@80 397 channelBuffer = new float *[modelChannels];
Chris@80 398
Chris@80 399 for (size_t c = 0; c < modelChannels; ++c) {
Chris@80 400 channelBuffer[c] = new float[totalFrames];
Chris@80 401 }
Chris@80 402
Chris@80 403 channelBufCount = modelChannels;
Chris@43 404 channelBufSiz = totalFrames;
Chris@43 405 }
Chris@80 406
Chris@43 407 size_t got = 0;
Chris@80 408
Chris@80 409 if (startFrame >= fadeIn/2) {
Chris@80 410 got = dtvm->getData(0, modelChannels - 1,
Chris@80 411 startFrame - fadeIn/2,
Chris@80 412 frames + fadeOut/2 + fadeIn/2,
Chris@80 413 channelBuffer);
Chris@80 414 } else {
Chris@80 415 size_t missing = fadeIn/2 - startFrame;
Chris@80 416
Chris@80 417 for (size_t c = 0; c < modelChannels; ++c) {
Chris@80 418 channelBuffer[c] += missing;
Chris@80 419 }
Chris@80 420
Chris@80 421 got = dtvm->getData(0, modelChannels - 1,
Chris@80 422 startFrame,
Chris@80 423 frames + fadeOut/2,
Chris@80 424 channelBuffer);
Chris@80 425
Chris@80 426 for (size_t c = 0; c < modelChannels; ++c) {
Chris@80 427 channelBuffer[c] -= missing;
Chris@80 428 }
Chris@80 429
Chris@80 430 got += missing;
Chris@80 431 }
Chris@43 432
Chris@43 433 for (size_t c = 0; c < m_targetChannelCount; ++c) {
Chris@43 434
Chris@80 435 size_t sourceChannel = (c % modelChannels);
Chris@43 436
Chris@233 437 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
Chris@43 438
Chris@43 439 float channelGain = gain;
Chris@43 440 if (pan != 0.0) {
Chris@43 441 if (c == 0) {
Chris@43 442 if (pan > 0.0) channelGain *= 1.0 - pan;
Chris@43 443 } else {
Chris@43 444 if (pan < 0.0) channelGain *= pan + 1.0;
Chris@43 445 }
Chris@43 446 }
Chris@43 447
Chris@43 448 for (size_t i = 0; i < fadeIn/2; ++i) {
Chris@43 449 float *back = buffer[c];
Chris@43 450 back -= fadeIn/2;
Chris@80 451 back[i] += (channelGain * channelBuffer[sourceChannel][i] * i) / fadeIn;
Chris@43 452 }
Chris@43 453
Chris@43 454 for (size_t i = 0; i < frames + fadeOut/2; ++i) {
Chris@43 455 float mult = channelGain;
Chris@43 456 if (i < fadeIn/2) {
Chris@43 457 mult = (mult * i) / fadeIn;
Chris@43 458 }
Chris@43 459 if (i > frames - fadeOut/2) {
Chris@43 460 mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut;
Chris@43 461 }
Chris@80 462 float val = channelBuffer[sourceChannel][i];
Chris@80 463 if (i >= got) val = 0.f;
Chris@80 464 buffer[c][i] += mult * val;
Chris@43 465 }
Chris@43 466 }
Chris@43 467
Chris@43 468 return got;
Chris@43 469 }
Chris@43 470
Chris@43 471 size_t
Chris@313 472 AudioGenerator::mixClipModel(Model *model,
Chris@313 473 size_t startFrame, size_t frames,
Chris@313 474 float **buffer, float gain, float pan)
Chris@43 475 {
Chris@308 476 ClipMixer *clipMixer = m_clipMixerMap[model];
Chris@308 477 if (!clipMixer) return 0;
Chris@43 478
Chris@305 479 size_t blocks = frames / m_processingBlockSize;
Chris@43 480
Chris@313 481 //!!! todo: the below -- it matters
Chris@313 482
Chris@43 483 //!!! hang on -- the fact that the audio callback play source's
Chris@43 484 //buffer is a multiple of the plugin's buffer size doesn't mean
Chris@43 485 //that we always get called for a multiple of it here (because it
Chris@43 486 //also depends on the JACK block size). how should we ensure that
Chris@43 487 //all models write the same amount in to the mix, and that we
Chris@43 488 //always have a multiple of the plugin buffer size? I guess this
Chris@43 489 //class has to be queryable for the plugin buffer size & the
Chris@43 490 //callback play source has to use that as a multiple for all the
Chris@43 491 //calls to mixModel
Chris@43 492
Chris@305 493 size_t got = blocks * m_processingBlockSize;
Chris@43 494
Chris@43 495 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 496 cout << "mixModel [clip]: frames " << frames
Chris@293 497 << ", blocks " << blocks << endl;
Chris@43 498 #endif
Chris@43 499
Chris@308 500 ClipMixer::NoteStart on;
Chris@308 501 ClipMixer::NoteEnd off;
Chris@43 502
Chris@275 503 NoteOffSet &noteOffs = m_noteOffs[model];
Chris@43 504
Chris@308 505 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@308 506
Chris@43 507 for (size_t i = 0; i < blocks; ++i) {
Chris@43 508
Chris@305 509 size_t reqStart = startFrame + i * m_processingBlockSize;
Chris@43 510
Chris@299 511 NoteList notes;
Chris@299 512 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
Chris@299 513 if (exportable) {
Chris@308 514 notes = exportable->getNotes(reqStart,
Chris@308 515 reqStart + m_processingBlockSize);
Chris@299 516 }
Chris@43 517
Chris@308 518 std::vector<ClipMixer::NoteStart> starts;
Chris@308 519 std::vector<ClipMixer::NoteEnd> ends;
Chris@43 520
Chris@275 521 for (NoteList::const_iterator ni = notes.begin();
Chris@275 522 ni != notes.end(); ++ni) {
Chris@43 523
Chris@275 524 size_t noteFrame = ni->start;
Chris@43 525
Chris@275 526 if (noteFrame < reqStart ||
Chris@305 527 noteFrame >= reqStart + m_processingBlockSize) continue;
Chris@43 528
Chris@43 529 while (noteOffs.begin() != noteOffs.end() &&
Chris@275 530 noteOffs.begin()->frame <= noteFrame) {
Chris@43 531
Chris@308 532 size_t eventFrame = noteOffs.begin()->frame;
Chris@308 533 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 534
Chris@308 535 off.frameOffset = eventFrame - reqStart;
Chris@308 536 off.frequency = noteOffs.begin()->frequency;
Chris@43 537
Chris@43 538 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 539 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 540 #endif
Chris@43 541
Chris@308 542 ends.push_back(off);
Chris@43 543 noteOffs.erase(noteOffs.begin());
Chris@43 544 }
Chris@43 545
Chris@308 546 on.frameOffset = noteFrame - reqStart;
Chris@308 547 on.frequency = ni->getFrequency();
Chris@308 548 on.level = float(ni->velocity) / 127.0;
Chris@308 549 on.pan = pan;
Chris@43 550
Chris@43 551 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 552 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << endl;
Chris@43 553 #endif
Chris@43 554
Chris@308 555 starts.push_back(on);
Chris@275 556 noteOffs.insert
Chris@308 557 (NoteOff(on.frequency, noteFrame + ni->duration));
Chris@43 558 }
Chris@43 559
Chris@43 560 while (noteOffs.begin() != noteOffs.end() &&
Chris@308 561 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
Chris@43 562
Chris@308 563 size_t eventFrame = noteOffs.begin()->frame;
Chris@308 564 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 565
Chris@308 566 off.frameOffset = eventFrame - reqStart;
Chris@308 567 off.frequency = noteOffs.begin()->frequency;
Chris@43 568
Chris@43 569 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 570 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 571 #endif
Chris@43 572
Chris@308 573 ends.push_back(off);
Chris@308 574 noteOffs.erase(noteOffs.begin());
Chris@43 575 }
Chris@43 576
Chris@43 577 for (size_t c = 0; c < m_targetChannelCount; ++c) {
Chris@308 578 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@308 579 }
Chris@43 580
Chris@308 581 clipMixer->mix(bufferIndexes, gain, starts, ends);
Chris@308 582 }
Chris@43 583
Chris@308 584 delete[] bufferIndexes;
Chris@43 585
Chris@43 586 return got;
Chris@43 587 }
Chris@313 588
Chris@313 589 size_t
Chris@313 590 AudioGenerator::mixContinuousSynthModel(Model *model,
Chris@313 591 size_t startFrame,
Chris@313 592 size_t frames,
Chris@313 593 float **buffer,
Chris@313 594 float gain,
Chris@313 595 float pan)
Chris@313 596 {
Chris@313 597 ContinuousSynth *synth = m_continuousSynthMap[model];
Chris@313 598 if (!synth) return 0;
Chris@313 599
Chris@313 600 // only type we support here at the moment
Chris@313 601 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
Chris@313 602 if (stvm->getScaleUnits() != "Hz") return 0;
Chris@313 603
Chris@313 604 size_t blocks = frames / m_processingBlockSize;
Chris@313 605
Chris@313 606 //!!! todo: see comment in mixClipModel
Chris@313 607
Chris@313 608 size_t got = blocks * m_processingBlockSize;
Chris@313 609
Chris@313 610 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 611 cout << "mixModel [synth]: frames " << frames
Chris@313 612 << ", blocks " << blocks << endl;
Chris@313 613 #endif
Chris@313 614
Chris@313 615 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@313 616
Chris@313 617 for (size_t i = 0; i < blocks; ++i) {
Chris@313 618
Chris@313 619 size_t reqStart = startFrame + i * m_processingBlockSize;
Chris@313 620
Chris@313 621 for (size_t c = 0; c < m_targetChannelCount; ++c) {
Chris@313 622 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@313 623 }
Chris@313 624
Chris@313 625 SparseTimeValueModel::PointList points =
Chris@313 626 stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
Chris@313 627
Chris@313 628 // by default, repeat last frequency
Chris@313 629 float f0 = 0.f;
Chris@313 630
Chris@313 631 // go straight to the last freq that is genuinely in this range
Chris@313 632 for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
Chris@313 633 itr != points.begin(); ) {
Chris@313 634 --itr;
Chris@313 635 if (itr->frame >= reqStart &&
Chris@313 636 itr->frame < reqStart + m_processingBlockSize) {
Chris@313 637 f0 = itr->value;
Chris@313 638 break;
Chris@313 639 }
Chris@313 640 }
Chris@313 641
Chris@313 642 cerr << "f0 = " << f0 << endl;
Chris@313 643
Chris@313 644 synth->mix(bufferIndexes,
Chris@313 645 gain,
Chris@313 646 pan,
Chris@313 647 f0);
Chris@313 648 }
Chris@313 649
Chris@313 650 delete[] bufferIndexes;
Chris@313 651
Chris@313 652 return got;
Chris@313 653 }
Chris@313 654