annotate audioio/AudioGenerator.cpp @ 403:eb84b06301da

Restore the old prev/next layer commands (that were never enabled because they didn't work) using the new fixed order layer list (so they now do work)
author Chris Cannam
date Tue, 02 Sep 2014 16:06:41 +0100
parents f747be6743ab
children 8d2112977aa0 72c662fe7ea3
rev   line source
Chris@43 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@43 2
Chris@43 3 /*
Chris@43 4 Sonic Visualiser
Chris@43 5 An audio file viewer and annotation editor.
Chris@43 6 Centre for Digital Music, Queen Mary, University of London.
Chris@43 7 This file copyright 2006 Chris Cannam.
Chris@43 8
Chris@43 9 This program is free software; you can redistribute it and/or
Chris@43 10 modify it under the terms of the GNU General Public License as
Chris@43 11 published by the Free Software Foundation; either version 2 of the
Chris@43 12 License, or (at your option) any later version. See the file
Chris@43 13 COPYING included with this distribution for more information.
Chris@43 14 */
Chris@43 15
Chris@43 16 #include "AudioGenerator.h"
Chris@43 17
Chris@43 18 #include "base/TempDirectory.h"
Chris@43 19 #include "base/PlayParameters.h"
Chris@43 20 #include "base/PlayParameterRepository.h"
Chris@43 21 #include "base/Pitch.h"
Chris@43 22 #include "base/Exceptions.h"
Chris@43 23
Chris@43 24 #include "data/model/NoteModel.h"
Chris@278 25 #include "data/model/FlexiNoteModel.h"
Chris@43 26 #include "data/model/DenseTimeValueModel.h"
Chris@313 27 #include "data/model/SparseTimeValueModel.h"
Chris@43 28 #include "data/model/SparseOneDimensionalModel.h"
Chris@299 29 #include "data/model/NoteData.h"
Chris@43 30
Chris@307 31 #include "ClipMixer.h"
Chris@313 32 #include "ContinuousSynth.h"
Chris@307 33
Chris@43 34 #include <iostream>
Chris@167 35 #include <cmath>
Chris@43 36
Chris@43 37 #include <QDir>
Chris@43 38 #include <QFile>
Chris@43 39
Chris@366 40 const int
Chris@315 41 AudioGenerator::m_processingBlockSize = 1024;
Chris@43 42
Chris@43 43 QString
Chris@43 44 AudioGenerator::m_sampleDir = "";
Chris@43 45
Chris@43 46 //#define DEBUG_AUDIO_GENERATOR 1
Chris@43 47
Chris@43 48 AudioGenerator::AudioGenerator() :
Chris@43 49 m_sourceSampleRate(0),
Chris@43 50 m_targetChannelCount(1),
Chris@348 51 m_waveType(0),
Chris@382 52 m_soloing(false),
Chris@382 53 m_channelBuffer(0),
Chris@382 54 m_channelBufSiz(0),
Chris@382 55 m_channelBufCount(0)
Chris@43 56 {
Chris@108 57 initialiseSampleDir();
Chris@43 58
Chris@43 59 connect(PlayParameterRepository::getInstance(),
Chris@309 60 SIGNAL(playClipIdChanged(const Playable *, QString)),
Chris@43 61 this,
Chris@309 62 SLOT(playClipIdChanged(const Playable *, QString)));
Chris@43 63 }
Chris@43 64
Chris@43 65 AudioGenerator::~AudioGenerator()
Chris@43 66 {
Chris@177 67 #ifdef DEBUG_AUDIO_GENERATOR
Chris@233 68 SVDEBUG << "AudioGenerator::~AudioGenerator" << endl;
Chris@177 69 #endif
Chris@43 70 }
Chris@43 71
Chris@108 72 void
Chris@108 73 AudioGenerator::initialiseSampleDir()
Chris@43 74 {
Chris@108 75 if (m_sampleDir != "") return;
Chris@108 76
Chris@108 77 try {
Chris@108 78 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
Chris@108 79 } catch (DirectoryCreationFailed f) {
Chris@293 80 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
Chris@108 81 << " Failed to create temporary sample directory"
Chris@293 82 << endl;
Chris@108 83 m_sampleDir = "";
Chris@108 84 return;
Chris@108 85 }
Chris@108 86
Chris@108 87 QDir sampleResourceDir(":/samples", "*.wav");
Chris@108 88
Chris@108 89 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
Chris@108 90
Chris@108 91 QString fileName(sampleResourceDir[i]);
Chris@108 92 QFile file(sampleResourceDir.filePath(fileName));
Chris@151 93 QString target = QDir(m_sampleDir).filePath(fileName);
Chris@108 94
Chris@151 95 if (!file.copy(target)) {
Chris@293 96 cerr << "WARNING: AudioGenerator::getSampleDir: "
Chris@294 97 << "Unable to copy " << fileName
Chris@108 98 << " into temporary directory \""
Chris@293 99 << m_sampleDir << "\"" << endl;
Chris@151 100 } else {
Chris@151 101 QFile tf(target);
Chris@151 102 tf.setPermissions(tf.permissions() |
Chris@151 103 QFile::WriteOwner |
Chris@151 104 QFile::WriteUser);
Chris@108 105 }
Chris@43 106 }
Chris@43 107 }
Chris@43 108
Chris@43 109 bool
Chris@43 110 AudioGenerator::addModel(Model *model)
Chris@43 111 {
Chris@43 112 if (m_sourceSampleRate == 0) {
Chris@43 113
Chris@43 114 m_sourceSampleRate = model->getSampleRate();
Chris@43 115
Chris@43 116 } else {
Chris@43 117
Chris@43 118 DenseTimeValueModel *dtvm =
Chris@43 119 dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 120
Chris@43 121 if (dtvm) {
Chris@43 122 m_sourceSampleRate = model->getSampleRate();
Chris@43 123 return true;
Chris@43 124 }
Chris@43 125 }
Chris@307 126
Chris@313 127 if (usesClipMixer(model)) {
Chris@313 128 ClipMixer *mixer = makeClipMixerFor(model);
Chris@313 129 if (mixer) {
Chris@313 130 QMutexLocker locker(&m_mutex);
Chris@313 131 m_clipMixerMap[model] = mixer;
Chris@313 132 return true;
Chris@313 133 }
Chris@313 134 }
Chris@313 135
Chris@313 136 if (usesContinuousSynth(model)) {
Chris@313 137 ContinuousSynth *synth = makeSynthFor(model);
Chris@313 138 if (synth) {
Chris@313 139 QMutexLocker locker(&m_mutex);
Chris@313 140 m_continuousSynthMap[model] = synth;
Chris@313 141 return true;
Chris@313 142 }
Chris@43 143 }
Chris@307 144
Chris@43 145 return false;
Chris@43 146 }
Chris@43 147
Chris@43 148 void
Chris@309 149 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
Chris@43 150 {
Chris@108 151 const Model *model = dynamic_cast<const Model *>(playable);
Chris@108 152 if (!model) {
Chris@309 153 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
Chris@108 154 << playable << " is not a supported model type"
Chris@293 155 << endl;
Chris@108 156 return;
Chris@108 157 }
Chris@108 158
Chris@307 159 if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
Chris@307 160
Chris@307 161 ClipMixer *mixer = makeClipMixerFor(model);
Chris@307 162 if (mixer) {
Chris@43 163 QMutexLocker locker(&m_mutex);
Chris@307 164 m_clipMixerMap[model] = mixer;
Chris@43 165 }
Chris@43 166 }
Chris@308 167
Chris@313 168 bool
Chris@313 169 AudioGenerator::usesClipMixer(const Model *model)
Chris@43 170 {
Chris@313 171 bool clip =
Chris@313 172 (qobject_cast<const SparseOneDimensionalModel *>(model) ||
Chris@313 173 qobject_cast<const NoteModel *>(model) ||
Chris@313 174 qobject_cast<const FlexiNoteModel *>(model));
Chris@313 175 return clip;
Chris@43 176 }
Chris@43 177
Chris@313 178 bool
Chris@349 179 AudioGenerator::wantsQuieterClips(const Model *model)
Chris@349 180 {
Chris@349 181 // basically, anything that usually has sustain (like notes) or
Chris@349 182 // often has multiple sounds at once (like notes) wants to use a
Chris@349 183 // quieter level than simple click tracks
Chris@349 184 bool does =
Chris@349 185 (qobject_cast<const NoteModel *>(model) ||
Chris@349 186 qobject_cast<const FlexiNoteModel *>(model));
Chris@349 187 return does;
Chris@349 188 }
Chris@349 189
Chris@349 190 bool
Chris@313 191 AudioGenerator::usesContinuousSynth(const Model *model)
Chris@43 192 {
Chris@313 193 bool cont =
Chris@313 194 (qobject_cast<const SparseTimeValueModel *>(model));
Chris@313 195 return cont;
Chris@313 196 }
Chris@313 197
Chris@307 198 ClipMixer *
Chris@307 199 AudioGenerator::makeClipMixerFor(const Model *model)
Chris@43 200 {
Chris@309 201 QString clipId;
Chris@43 202
Chris@108 203 const Playable *playable = model;
Chris@108 204 if (!playable || !playable->canPlay()) return 0;
Chris@108 205
Chris@43 206 PlayParameters *parameters =
Chris@108 207 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 208 if (parameters) {
Chris@309 209 clipId = parameters->getPlayClipId();
Chris@43 210 }
Chris@43 211
Chris@309 212 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
Chris@276 213
Chris@309 214 if (clipId == "") {
Chris@308 215 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
Chris@276 216 return 0;
Chris@276 217 }
Chris@43 218
Chris@308 219 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
Chris@308 220 m_sourceSampleRate,
Chris@308 221 m_processingBlockSize);
Chris@307 222
Chris@308 223 float clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0f); // required
Chris@307 224
Chris@309 225 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
Chris@307 226
Chris@349 227 float level = wantsQuieterClips(model) ? 0.5 : 1.0;
Chris@349 228 if (!mixer->loadClipData(clipPath, clipF0, level)) {
Chris@308 229 delete mixer;
Chris@43 230 return 0;
Chris@43 231 }
Chris@43 232
Chris@309 233 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
Chris@43 234
Chris@308 235 return mixer;
Chris@308 236 }
Chris@43 237
Chris@313 238 ContinuousSynth *
Chris@313 239 AudioGenerator::makeSynthFor(const Model *model)
Chris@313 240 {
Chris@313 241 const Playable *playable = model;
Chris@313 242 if (!playable || !playable->canPlay()) return 0;
Chris@313 243
Chris@313 244 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
Chris@313 245 m_sourceSampleRate,
rmb456@323 246 m_processingBlockSize,
rmb456@323 247 m_waveType);
Chris@313 248
Chris@313 249 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
Chris@313 250
Chris@313 251 return synth;
Chris@313 252 }
Chris@313 253
Chris@43 254 void
Chris@43 255 AudioGenerator::removeModel(Model *model)
Chris@43 256 {
Chris@43 257 SparseOneDimensionalModel *sodm =
Chris@43 258 dynamic_cast<SparseOneDimensionalModel *>(model);
Chris@43 259 if (!sodm) return; // nothing to do
Chris@43 260
Chris@43 261 QMutexLocker locker(&m_mutex);
Chris@43 262
Chris@308 263 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
Chris@43 264
Chris@308 265 ClipMixer *mixer = m_clipMixerMap[sodm];
Chris@308 266 m_clipMixerMap.erase(sodm);
Chris@308 267 delete mixer;
Chris@43 268 }
Chris@43 269
Chris@43 270 void
Chris@43 271 AudioGenerator::clearModels()
Chris@43 272 {
Chris@43 273 QMutexLocker locker(&m_mutex);
Chris@308 274
Chris@308 275 while (!m_clipMixerMap.empty()) {
Chris@308 276 ClipMixer *mixer = m_clipMixerMap.begin()->second;
Chris@308 277 m_clipMixerMap.erase(m_clipMixerMap.begin());
Chris@308 278 delete mixer;
Chris@43 279 }
Chris@43 280 }
Chris@43 281
Chris@43 282 void
Chris@43 283 AudioGenerator::reset()
Chris@43 284 {
Chris@43 285 QMutexLocker locker(&m_mutex);
Chris@308 286
Chris@397 287 cerr << "AudioGenerator::reset()" << endl;
Chris@397 288
Chris@308 289 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@43 290 if (i->second) {
Chris@308 291 i->second->reset();
Chris@43 292 }
Chris@43 293 }
Chris@43 294
Chris@43 295 m_noteOffs.clear();
Chris@43 296 }
Chris@43 297
Chris@43 298 void
Chris@366 299 AudioGenerator::setTargetChannelCount(int targetChannelCount)
Chris@43 300 {
Chris@43 301 if (m_targetChannelCount == targetChannelCount) return;
Chris@43 302
Chris@233 303 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
Chris@43 304
Chris@43 305 QMutexLocker locker(&m_mutex);
Chris@43 306 m_targetChannelCount = targetChannelCount;
Chris@43 307
Chris@308 308 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@308 309 if (i->second) i->second->setChannelCount(targetChannelCount);
Chris@43 310 }
Chris@43 311 }
Chris@43 312
Chris@366 313 int
Chris@43 314 AudioGenerator::getBlockSize() const
Chris@43 315 {
Chris@305 316 return m_processingBlockSize;
Chris@43 317 }
Chris@43 318
Chris@43 319 void
Chris@43 320 AudioGenerator::setSoloModelSet(std::set<Model *> s)
Chris@43 321 {
Chris@43 322 QMutexLocker locker(&m_mutex);
Chris@43 323
Chris@43 324 m_soloModelSet = s;
Chris@43 325 m_soloing = true;
Chris@43 326 }
Chris@43 327
Chris@43 328 void
Chris@43 329 AudioGenerator::clearSoloModelSet()
Chris@43 330 {
Chris@43 331 QMutexLocker locker(&m_mutex);
Chris@43 332
Chris@43 333 m_soloModelSet.clear();
Chris@43 334 m_soloing = false;
Chris@43 335 }
Chris@43 336
Chris@366 337 int
Chris@366 338 AudioGenerator::mixModel(Model *model, int startFrame, int frameCount,
Chris@366 339 float **buffer, int fadeIn, int fadeOut)
Chris@43 340 {
Chris@43 341 if (m_sourceSampleRate == 0) {
Chris@293 342 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
Chris@43 343 return frameCount;
Chris@43 344 }
Chris@43 345
Chris@43 346 QMutexLocker locker(&m_mutex);
Chris@43 347
Chris@108 348 Playable *playable = model;
Chris@108 349 if (!playable || !playable->canPlay()) return frameCount;
Chris@108 350
Chris@43 351 PlayParameters *parameters =
Chris@108 352 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 353 if (!parameters) return frameCount;
Chris@43 354
Chris@43 355 bool playing = !parameters->isPlayMuted();
Chris@43 356 if (!playing) {
Chris@43 357 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 358 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
Chris@43 359 #endif
Chris@43 360 return frameCount;
Chris@43 361 }
Chris@43 362
Chris@43 363 if (m_soloing) {
Chris@43 364 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
Chris@43 365 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 366 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
Chris@43 367 #endif
Chris@43 368 return frameCount;
Chris@43 369 }
Chris@43 370 }
Chris@43 371
Chris@43 372 float gain = parameters->getPlayGain();
Chris@43 373 float pan = parameters->getPlayPan();
Chris@43 374
Chris@43 375 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 376 if (dtvm) {
Chris@43 377 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
Chris@43 378 buffer, gain, pan, fadeIn, fadeOut);
Chris@43 379 }
Chris@43 380
Chris@313 381 if (usesClipMixer(model)) {
Chris@313 382 return mixClipModel(model, startFrame, frameCount,
Chris@313 383 buffer, gain, pan);
Chris@313 384 }
Chris@43 385
Chris@313 386 if (usesContinuousSynth(model)) {
Chris@313 387 return mixContinuousSynthModel(model, startFrame, frameCount,
Chris@313 388 buffer, gain, pan);
Chris@43 389 }
Chris@43 390
Chris@276 391 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
Chris@276 392
Chris@43 393 return frameCount;
Chris@43 394 }
Chris@43 395
Chris@366 396 int
Chris@43 397 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
Chris@366 398 int startFrame, int frames,
Chris@43 399 float **buffer, float gain, float pan,
Chris@366 400 int fadeIn, int fadeOut)
Chris@43 401 {
Chris@382 402 int maxFrames = frames + std::max(fadeIn, fadeOut);
Chris@43 403
Chris@366 404 int modelChannels = dtvm->getChannelCount();
Chris@80 405
Chris@382 406 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
Chris@80 407
Chris@382 408 for (int c = 0; c < m_channelBufCount; ++c) {
Chris@382 409 delete[] m_channelBuffer[c];
Chris@80 410 }
Chris@80 411
Chris@382 412 delete[] m_channelBuffer;
Chris@382 413 m_channelBuffer = new float *[modelChannels];
Chris@80 414
Chris@366 415 for (int c = 0; c < modelChannels; ++c) {
Chris@382 416 m_channelBuffer[c] = new float[maxFrames];
Chris@80 417 }
Chris@80 418
Chris@382 419 m_channelBufCount = modelChannels;
Chris@382 420 m_channelBufSiz = maxFrames;
Chris@43 421 }
Chris@80 422
Chris@366 423 int got = 0;
Chris@80 424
Chris@80 425 if (startFrame >= fadeIn/2) {
Chris@80 426 got = dtvm->getData(0, modelChannels - 1,
Chris@80 427 startFrame - fadeIn/2,
Chris@80 428 frames + fadeOut/2 + fadeIn/2,
Chris@382 429 m_channelBuffer);
Chris@80 430 } else {
Chris@366 431 int missing = fadeIn/2 - startFrame;
Chris@80 432
Chris@366 433 for (int c = 0; c < modelChannels; ++c) {
Chris@382 434 m_channelBuffer[c] += missing;
Chris@382 435 }
Chris@382 436
Chris@382 437 if (missing > 0) {
Chris@382 438 cerr << "note: channelBufSiz = " << m_channelBufSiz
Chris@382 439 << ", frames + fadeOut/2 = " << frames + fadeOut/2
Chris@382 440 << ", startFrame = " << startFrame
Chris@382 441 << ", missing = " << missing << endl;
Chris@80 442 }
Chris@80 443
Chris@80 444 got = dtvm->getData(0, modelChannels - 1,
Chris@80 445 startFrame,
Chris@80 446 frames + fadeOut/2,
Chris@382 447 m_channelBuffer);
Chris@80 448
Chris@366 449 for (int c = 0; c < modelChannels; ++c) {
Chris@382 450 m_channelBuffer[c] -= missing;
Chris@80 451 }
Chris@80 452
Chris@80 453 got += missing;
Chris@80 454 }
Chris@43 455
Chris@366 456 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@43 457
Chris@366 458 int sourceChannel = (c % modelChannels);
Chris@43 459
Chris@233 460 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
Chris@43 461
Chris@43 462 float channelGain = gain;
Chris@43 463 if (pan != 0.0) {
Chris@43 464 if (c == 0) {
Chris@43 465 if (pan > 0.0) channelGain *= 1.0 - pan;
Chris@43 466 } else {
Chris@43 467 if (pan < 0.0) channelGain *= pan + 1.0;
Chris@43 468 }
Chris@43 469 }
Chris@43 470
Chris@366 471 for (int i = 0; i < fadeIn/2; ++i) {
Chris@43 472 float *back = buffer[c];
Chris@43 473 back -= fadeIn/2;
Chris@382 474 back[i] += (channelGain * m_channelBuffer[sourceChannel][i] * i) / fadeIn;
Chris@43 475 }
Chris@43 476
Chris@366 477 for (int i = 0; i < frames + fadeOut/2; ++i) {
Chris@43 478 float mult = channelGain;
Chris@43 479 if (i < fadeIn/2) {
Chris@43 480 mult = (mult * i) / fadeIn;
Chris@43 481 }
Chris@43 482 if (i > frames - fadeOut/2) {
Chris@43 483 mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut;
Chris@43 484 }
Chris@382 485 float val = m_channelBuffer[sourceChannel][i];
Chris@80 486 if (i >= got) val = 0.f;
Chris@80 487 buffer[c][i] += mult * val;
Chris@43 488 }
Chris@43 489 }
Chris@43 490
Chris@43 491 return got;
Chris@43 492 }
Chris@43 493
Chris@366 494 int
Chris@313 495 AudioGenerator::mixClipModel(Model *model,
Chris@366 496 int startFrame, int frames,
Chris@313 497 float **buffer, float gain, float pan)
Chris@43 498 {
Chris@308 499 ClipMixer *clipMixer = m_clipMixerMap[model];
Chris@308 500 if (!clipMixer) return 0;
Chris@43 501
Chris@366 502 int blocks = frames / m_processingBlockSize;
Chris@43 503
Chris@313 504 //!!! todo: the below -- it matters
Chris@313 505
Chris@43 506 //!!! hang on -- the fact that the audio callback play source's
Chris@43 507 //buffer is a multiple of the plugin's buffer size doesn't mean
Chris@43 508 //that we always get called for a multiple of it here (because it
Chris@43 509 //also depends on the JACK block size). how should we ensure that
Chris@43 510 //all models write the same amount in to the mix, and that we
Chris@43 511 //always have a multiple of the plugin buffer size? I guess this
Chris@43 512 //class has to be queryable for the plugin buffer size & the
Chris@43 513 //callback play source has to use that as a multiple for all the
Chris@43 514 //calls to mixModel
Chris@43 515
Chris@366 516 int got = blocks * m_processingBlockSize;
Chris@43 517
Chris@43 518 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 519 cout << "mixModel [clip]: frames " << frames
Chris@293 520 << ", blocks " << blocks << endl;
Chris@43 521 #endif
Chris@43 522
Chris@308 523 ClipMixer::NoteStart on;
Chris@308 524 ClipMixer::NoteEnd off;
Chris@43 525
Chris@275 526 NoteOffSet &noteOffs = m_noteOffs[model];
Chris@43 527
Chris@308 528 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@308 529
Chris@366 530 for (int i = 0; i < blocks; ++i) {
Chris@43 531
Chris@366 532 int reqStart = startFrame + i * m_processingBlockSize;
Chris@43 533
Chris@299 534 NoteList notes;
Chris@299 535 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
Chris@299 536 if (exportable) {
Chris@366 537 notes = exportable->getNotesWithin(reqStart,
Chris@366 538 reqStart + m_processingBlockSize);
Chris@299 539 }
Chris@43 540
Chris@308 541 std::vector<ClipMixer::NoteStart> starts;
Chris@308 542 std::vector<ClipMixer::NoteEnd> ends;
Chris@43 543
Chris@275 544 for (NoteList::const_iterator ni = notes.begin();
Chris@275 545 ni != notes.end(); ++ni) {
Chris@43 546
Chris@366 547 int noteFrame = ni->start;
Chris@43 548
Chris@275 549 if (noteFrame < reqStart ||
Chris@305 550 noteFrame >= reqStart + m_processingBlockSize) continue;
Chris@43 551
Chris@43 552 while (noteOffs.begin() != noteOffs.end() &&
Chris@275 553 noteOffs.begin()->frame <= noteFrame) {
Chris@43 554
Chris@366 555 int eventFrame = noteOffs.begin()->frame;
Chris@308 556 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 557
Chris@308 558 off.frameOffset = eventFrame - reqStart;
Chris@308 559 off.frequency = noteOffs.begin()->frequency;
Chris@43 560
Chris@43 561 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 562 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 563 #endif
Chris@43 564
Chris@308 565 ends.push_back(off);
Chris@43 566 noteOffs.erase(noteOffs.begin());
Chris@43 567 }
Chris@43 568
Chris@308 569 on.frameOffset = noteFrame - reqStart;
Chris@308 570 on.frequency = ni->getFrequency();
Chris@308 571 on.level = float(ni->velocity) / 127.0;
Chris@308 572 on.pan = pan;
Chris@43 573
Chris@43 574 #ifdef DEBUG_AUDIO_GENERATOR
Chris@346 575 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
Chris@43 576 #endif
Chris@43 577
Chris@308 578 starts.push_back(on);
Chris@275 579 noteOffs.insert
Chris@308 580 (NoteOff(on.frequency, noteFrame + ni->duration));
Chris@43 581 }
Chris@43 582
Chris@43 583 while (noteOffs.begin() != noteOffs.end() &&
Chris@308 584 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
Chris@43 585
Chris@366 586 int eventFrame = noteOffs.begin()->frame;
Chris@308 587 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 588
Chris@308 589 off.frameOffset = eventFrame - reqStart;
Chris@308 590 off.frequency = noteOffs.begin()->frequency;
Chris@43 591
Chris@43 592 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 593 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 594 #endif
Chris@43 595
Chris@308 596 ends.push_back(off);
Chris@308 597 noteOffs.erase(noteOffs.begin());
Chris@43 598 }
Chris@43 599
Chris@366 600 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@308 601 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@308 602 }
Chris@43 603
Chris@308 604 clipMixer->mix(bufferIndexes, gain, starts, ends);
Chris@308 605 }
Chris@43 606
Chris@308 607 delete[] bufferIndexes;
Chris@43 608
Chris@43 609 return got;
Chris@43 610 }
Chris@313 611
Chris@366 612 int
Chris@313 613 AudioGenerator::mixContinuousSynthModel(Model *model,
Chris@366 614 int startFrame,
Chris@366 615 int frames,
Chris@313 616 float **buffer,
Chris@313 617 float gain,
Chris@313 618 float pan)
Chris@313 619 {
Chris@313 620 ContinuousSynth *synth = m_continuousSynthMap[model];
Chris@313 621 if (!synth) return 0;
Chris@313 622
Chris@313 623 // only type we support here at the moment
Chris@313 624 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
Chris@313 625 if (stvm->getScaleUnits() != "Hz") return 0;
Chris@313 626
Chris@366 627 int blocks = frames / m_processingBlockSize;
Chris@313 628
Chris@313 629 //!!! todo: see comment in mixClipModel
Chris@313 630
Chris@366 631 int got = blocks * m_processingBlockSize;
Chris@313 632
Chris@313 633 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 634 cout << "mixModel [synth]: frames " << frames
Chris@313 635 << ", blocks " << blocks << endl;
Chris@313 636 #endif
Chris@313 637
Chris@313 638 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@313 639
Chris@366 640 for (int i = 0; i < blocks; ++i) {
Chris@313 641
Chris@366 642 int reqStart = startFrame + i * m_processingBlockSize;
Chris@313 643
Chris@366 644 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@313 645 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@313 646 }
Chris@313 647
Chris@313 648 SparseTimeValueModel::PointList points =
Chris@313 649 stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
Chris@313 650
Chris@313 651 // by default, repeat last frequency
Chris@313 652 float f0 = 0.f;
Chris@313 653
Chris@313 654 // go straight to the last freq that is genuinely in this range
Chris@313 655 for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
Chris@313 656 itr != points.begin(); ) {
Chris@313 657 --itr;
Chris@313 658 if (itr->frame >= reqStart &&
Chris@313 659 itr->frame < reqStart + m_processingBlockSize) {
Chris@313 660 f0 = itr->value;
Chris@313 661 break;
Chris@313 662 }
Chris@313 663 }
Chris@313 664
Chris@314 665 // if we found no such frequency and the next point is further
Chris@314 666 // away than twice the model resolution, go silent (same
Chris@314 667 // criterion TimeValueLayer uses for ending a discrete curve
Chris@314 668 // segment)
Chris@314 669 if (f0 == 0.f) {
Chris@314 670 SparseTimeValueModel::PointList nextPoints =
Chris@314 671 stvm->getNextPoints(reqStart + m_processingBlockSize);
Chris@314 672 if (nextPoints.empty() ||
Chris@314 673 nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
Chris@314 674 f0 = -1.f;
Chris@314 675 }
Chris@314 676 }
Chris@314 677
Chris@315 678 // cerr << "f0 = " << f0 << endl;
Chris@313 679
Chris@313 680 synth->mix(bufferIndexes,
Chris@313 681 gain,
Chris@313 682 pan,
Chris@313 683 f0);
Chris@313 684 }
Chris@313 685
Chris@313 686 delete[] bufferIndexes;
Chris@313 687
Chris@313 688 return got;
Chris@313 689 }
Chris@313 690