annotate audioio/AudioGenerator.cpp @ 459:74d575708e06 alignment-simple

Branch to test simple FFT model code
author Chris Cannam
date Mon, 15 Jun 2015 09:15:55 +0100
parents 3485d324c172
children 49c89950b06d
rev   line source
Chris@43 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@43 2
Chris@43 3 /*
Chris@43 4 Sonic Visualiser
Chris@43 5 An audio file viewer and annotation editor.
Chris@43 6 Centre for Digital Music, Queen Mary, University of London.
Chris@43 7 This file copyright 2006 Chris Cannam.
Chris@43 8
Chris@43 9 This program is free software; you can redistribute it and/or
Chris@43 10 modify it under the terms of the GNU General Public License as
Chris@43 11 published by the Free Software Foundation; either version 2 of the
Chris@43 12 License, or (at your option) any later version. See the file
Chris@43 13 COPYING included with this distribution for more information.
Chris@43 14 */
Chris@43 15
Chris@43 16 #include "AudioGenerator.h"
Chris@43 17
Chris@43 18 #include "base/TempDirectory.h"
Chris@43 19 #include "base/PlayParameters.h"
Chris@43 20 #include "base/PlayParameterRepository.h"
Chris@43 21 #include "base/Pitch.h"
Chris@43 22 #include "base/Exceptions.h"
Chris@43 23
Chris@43 24 #include "data/model/NoteModel.h"
Chris@278 25 #include "data/model/FlexiNoteModel.h"
Chris@43 26 #include "data/model/DenseTimeValueModel.h"
Chris@313 27 #include "data/model/SparseTimeValueModel.h"
Chris@43 28 #include "data/model/SparseOneDimensionalModel.h"
Chris@299 29 #include "data/model/NoteData.h"
Chris@43 30
Chris@307 31 #include "ClipMixer.h"
Chris@313 32 #include "ContinuousSynth.h"
Chris@307 33
Chris@43 34 #include <iostream>
Chris@167 35 #include <cmath>
Chris@43 36
Chris@43 37 #include <QDir>
Chris@43 38 #include <QFile>
Chris@43 39
Chris@436 40 const sv_frame_t
Chris@315 41 AudioGenerator::m_processingBlockSize = 1024;
Chris@43 42
Chris@43 43 QString
Chris@43 44 AudioGenerator::m_sampleDir = "";
Chris@43 45
Chris@43 46 //#define DEBUG_AUDIO_GENERATOR 1
Chris@43 47
Chris@43 48 AudioGenerator::AudioGenerator() :
Chris@43 49 m_sourceSampleRate(0),
Chris@43 50 m_targetChannelCount(1),
Chris@348 51 m_waveType(0),
Chris@382 52 m_soloing(false),
Chris@382 53 m_channelBuffer(0),
Chris@382 54 m_channelBufSiz(0),
Chris@382 55 m_channelBufCount(0)
Chris@43 56 {
Chris@108 57 initialiseSampleDir();
Chris@43 58
Chris@43 59 connect(PlayParameterRepository::getInstance(),
Chris@309 60 SIGNAL(playClipIdChanged(const Playable *, QString)),
Chris@43 61 this,
Chris@309 62 SLOT(playClipIdChanged(const Playable *, QString)));
Chris@43 63 }
Chris@43 64
Chris@43 65 AudioGenerator::~AudioGenerator()
Chris@43 66 {
Chris@177 67 #ifdef DEBUG_AUDIO_GENERATOR
Chris@233 68 SVDEBUG << "AudioGenerator::~AudioGenerator" << endl;
Chris@177 69 #endif
Chris@43 70 }
Chris@43 71
Chris@108 72 void
Chris@108 73 AudioGenerator::initialiseSampleDir()
Chris@43 74 {
Chris@108 75 if (m_sampleDir != "") return;
Chris@108 76
Chris@108 77 try {
Chris@108 78 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
Chris@108 79 } catch (DirectoryCreationFailed f) {
Chris@293 80 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
Chris@108 81 << " Failed to create temporary sample directory"
Chris@293 82 << endl;
Chris@108 83 m_sampleDir = "";
Chris@108 84 return;
Chris@108 85 }
Chris@108 86
Chris@108 87 QDir sampleResourceDir(":/samples", "*.wav");
Chris@108 88
Chris@108 89 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
Chris@108 90
Chris@108 91 QString fileName(sampleResourceDir[i]);
Chris@108 92 QFile file(sampleResourceDir.filePath(fileName));
Chris@151 93 QString target = QDir(m_sampleDir).filePath(fileName);
Chris@108 94
Chris@151 95 if (!file.copy(target)) {
Chris@293 96 cerr << "WARNING: AudioGenerator::getSampleDir: "
Chris@294 97 << "Unable to copy " << fileName
Chris@108 98 << " into temporary directory \""
Chris@293 99 << m_sampleDir << "\"" << endl;
Chris@151 100 } else {
Chris@151 101 QFile tf(target);
Chris@151 102 tf.setPermissions(tf.permissions() |
Chris@151 103 QFile::WriteOwner |
Chris@151 104 QFile::WriteUser);
Chris@108 105 }
Chris@43 106 }
Chris@43 107 }
Chris@43 108
Chris@43 109 bool
Chris@43 110 AudioGenerator::addModel(Model *model)
Chris@43 111 {
Chris@43 112 if (m_sourceSampleRate == 0) {
Chris@43 113
Chris@43 114 m_sourceSampleRate = model->getSampleRate();
Chris@43 115
Chris@43 116 } else {
Chris@43 117
Chris@43 118 DenseTimeValueModel *dtvm =
Chris@43 119 dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 120
Chris@43 121 if (dtvm) {
Chris@43 122 m_sourceSampleRate = model->getSampleRate();
Chris@43 123 return true;
Chris@43 124 }
Chris@43 125 }
Chris@307 126
Chris@418 127 const Playable *playable = model;
Chris@418 128 if (!playable || !playable->canPlay()) return 0;
Chris@418 129
Chris@418 130 PlayParameters *parameters =
Chris@418 131 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@418 132
Chris@418 133 bool willPlay = !parameters->isPlayMuted();
Chris@418 134
Chris@313 135 if (usesClipMixer(model)) {
Chris@313 136 ClipMixer *mixer = makeClipMixerFor(model);
Chris@313 137 if (mixer) {
Chris@313 138 QMutexLocker locker(&m_mutex);
Chris@313 139 m_clipMixerMap[model] = mixer;
Chris@418 140 return willPlay;
Chris@313 141 }
Chris@313 142 }
Chris@313 143
Chris@313 144 if (usesContinuousSynth(model)) {
Chris@313 145 ContinuousSynth *synth = makeSynthFor(model);
Chris@313 146 if (synth) {
Chris@313 147 QMutexLocker locker(&m_mutex);
Chris@313 148 m_continuousSynthMap[model] = synth;
Chris@418 149 return willPlay;
Chris@313 150 }
Chris@43 151 }
Chris@307 152
Chris@43 153 return false;
Chris@43 154 }
Chris@43 155
Chris@43 156 void
Chris@309 157 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
Chris@43 158 {
Chris@108 159 const Model *model = dynamic_cast<const Model *>(playable);
Chris@108 160 if (!model) {
Chris@309 161 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
Chris@108 162 << playable << " is not a supported model type"
Chris@293 163 << endl;
Chris@108 164 return;
Chris@108 165 }
Chris@108 166
Chris@307 167 if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
Chris@307 168
Chris@307 169 ClipMixer *mixer = makeClipMixerFor(model);
Chris@307 170 if (mixer) {
Chris@43 171 QMutexLocker locker(&m_mutex);
Chris@307 172 m_clipMixerMap[model] = mixer;
Chris@43 173 }
Chris@43 174 }
Chris@308 175
Chris@313 176 bool
Chris@313 177 AudioGenerator::usesClipMixer(const Model *model)
Chris@43 178 {
Chris@313 179 bool clip =
Chris@313 180 (qobject_cast<const SparseOneDimensionalModel *>(model) ||
Chris@313 181 qobject_cast<const NoteModel *>(model) ||
Chris@313 182 qobject_cast<const FlexiNoteModel *>(model));
Chris@313 183 return clip;
Chris@43 184 }
Chris@43 185
Chris@313 186 bool
Chris@349 187 AudioGenerator::wantsQuieterClips(const Model *model)
Chris@349 188 {
Chris@349 189 // basically, anything that usually has sustain (like notes) or
Chris@349 190 // often has multiple sounds at once (like notes) wants to use a
Chris@349 191 // quieter level than simple click tracks
Chris@349 192 bool does =
Chris@349 193 (qobject_cast<const NoteModel *>(model) ||
Chris@349 194 qobject_cast<const FlexiNoteModel *>(model));
Chris@349 195 return does;
Chris@349 196 }
Chris@349 197
Chris@349 198 bool
Chris@313 199 AudioGenerator::usesContinuousSynth(const Model *model)
Chris@43 200 {
Chris@313 201 bool cont =
Chris@313 202 (qobject_cast<const SparseTimeValueModel *>(model));
Chris@313 203 return cont;
Chris@313 204 }
Chris@313 205
Chris@307 206 ClipMixer *
Chris@307 207 AudioGenerator::makeClipMixerFor(const Model *model)
Chris@43 208 {
Chris@309 209 QString clipId;
Chris@43 210
Chris@108 211 const Playable *playable = model;
Chris@108 212 if (!playable || !playable->canPlay()) return 0;
Chris@108 213
Chris@43 214 PlayParameters *parameters =
Chris@108 215 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 216 if (parameters) {
Chris@309 217 clipId = parameters->getPlayClipId();
Chris@43 218 }
Chris@43 219
Chris@445 220 #ifdef DEBUG_AUDIO_GENERATOR
Chris@309 221 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
Chris@445 222 #endif
Chris@276 223
Chris@309 224 if (clipId == "") {
Chris@308 225 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
Chris@276 226 return 0;
Chris@276 227 }
Chris@43 228
Chris@308 229 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
Chris@308 230 m_sourceSampleRate,
Chris@308 231 m_processingBlockSize);
Chris@307 232
Chris@436 233 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required
Chris@307 234
Chris@309 235 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
Chris@307 236
Chris@436 237 double level = wantsQuieterClips(model) ? 0.5 : 1.0;
Chris@349 238 if (!mixer->loadClipData(clipPath, clipF0, level)) {
Chris@308 239 delete mixer;
Chris@43 240 return 0;
Chris@43 241 }
Chris@43 242
Chris@445 243 #ifdef DEBUG_AUDIO_GENERATOR
Chris@309 244 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
Chris@445 245 #endif
Chris@43 246
Chris@308 247 return mixer;
Chris@308 248 }
Chris@43 249
Chris@313 250 ContinuousSynth *
Chris@313 251 AudioGenerator::makeSynthFor(const Model *model)
Chris@313 252 {
Chris@313 253 const Playable *playable = model;
Chris@313 254 if (!playable || !playable->canPlay()) return 0;
Chris@313 255
Chris@313 256 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
Chris@313 257 m_sourceSampleRate,
rmb456@323 258 m_processingBlockSize,
rmb456@323 259 m_waveType);
Chris@313 260
Chris@445 261 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 262 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
Chris@445 263 #endif
Chris@313 264
Chris@313 265 return synth;
Chris@313 266 }
Chris@313 267
Chris@43 268 void
Chris@43 269 AudioGenerator::removeModel(Model *model)
Chris@43 270 {
Chris@43 271 SparseOneDimensionalModel *sodm =
Chris@43 272 dynamic_cast<SparseOneDimensionalModel *>(model);
Chris@43 273 if (!sodm) return; // nothing to do
Chris@43 274
Chris@43 275 QMutexLocker locker(&m_mutex);
Chris@43 276
Chris@308 277 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
Chris@43 278
Chris@308 279 ClipMixer *mixer = m_clipMixerMap[sodm];
Chris@308 280 m_clipMixerMap.erase(sodm);
Chris@308 281 delete mixer;
Chris@43 282 }
Chris@43 283
Chris@43 284 void
Chris@43 285 AudioGenerator::clearModels()
Chris@43 286 {
Chris@43 287 QMutexLocker locker(&m_mutex);
Chris@308 288
Chris@308 289 while (!m_clipMixerMap.empty()) {
Chris@308 290 ClipMixer *mixer = m_clipMixerMap.begin()->second;
Chris@308 291 m_clipMixerMap.erase(m_clipMixerMap.begin());
Chris@308 292 delete mixer;
Chris@43 293 }
Chris@43 294 }
Chris@43 295
Chris@43 296 void
Chris@43 297 AudioGenerator::reset()
Chris@43 298 {
Chris@43 299 QMutexLocker locker(&m_mutex);
Chris@308 300
Chris@445 301 #ifdef DEBUG_AUDIO_GENERATOR
Chris@397 302 cerr << "AudioGenerator::reset()" << endl;
Chris@445 303 #endif
Chris@397 304
Chris@308 305 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@43 306 if (i->second) {
Chris@308 307 i->second->reset();
Chris@43 308 }
Chris@43 309 }
Chris@43 310
Chris@43 311 m_noteOffs.clear();
Chris@43 312 }
Chris@43 313
Chris@43 314 void
Chris@366 315 AudioGenerator::setTargetChannelCount(int targetChannelCount)
Chris@43 316 {
Chris@43 317 if (m_targetChannelCount == targetChannelCount) return;
Chris@43 318
Chris@233 319 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
Chris@43 320
Chris@43 321 QMutexLocker locker(&m_mutex);
Chris@43 322 m_targetChannelCount = targetChannelCount;
Chris@43 323
Chris@308 324 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@308 325 if (i->second) i->second->setChannelCount(targetChannelCount);
Chris@43 326 }
Chris@43 327 }
Chris@43 328
Chris@436 329 sv_frame_t
Chris@43 330 AudioGenerator::getBlockSize() const
Chris@43 331 {
Chris@305 332 return m_processingBlockSize;
Chris@43 333 }
Chris@43 334
Chris@43 335 void
Chris@43 336 AudioGenerator::setSoloModelSet(std::set<Model *> s)
Chris@43 337 {
Chris@43 338 QMutexLocker locker(&m_mutex);
Chris@43 339
Chris@43 340 m_soloModelSet = s;
Chris@43 341 m_soloing = true;
Chris@43 342 }
Chris@43 343
Chris@43 344 void
Chris@43 345 AudioGenerator::clearSoloModelSet()
Chris@43 346 {
Chris@43 347 QMutexLocker locker(&m_mutex);
Chris@43 348
Chris@43 349 m_soloModelSet.clear();
Chris@43 350 m_soloing = false;
Chris@43 351 }
Chris@43 352
Chris@436 353 sv_frame_t
Chris@436 354 AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
Chris@436 355 float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut)
Chris@43 356 {
Chris@43 357 if (m_sourceSampleRate == 0) {
Chris@293 358 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
Chris@43 359 return frameCount;
Chris@43 360 }
Chris@43 361
Chris@43 362 QMutexLocker locker(&m_mutex);
Chris@43 363
Chris@108 364 Playable *playable = model;
Chris@108 365 if (!playable || !playable->canPlay()) return frameCount;
Chris@108 366
Chris@43 367 PlayParameters *parameters =
Chris@108 368 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 369 if (!parameters) return frameCount;
Chris@43 370
Chris@43 371 bool playing = !parameters->isPlayMuted();
Chris@43 372 if (!playing) {
Chris@43 373 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 374 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
Chris@43 375 #endif
Chris@43 376 return frameCount;
Chris@43 377 }
Chris@43 378
Chris@43 379 if (m_soloing) {
Chris@43 380 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
Chris@43 381 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 382 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
Chris@43 383 #endif
Chris@43 384 return frameCount;
Chris@43 385 }
Chris@43 386 }
Chris@43 387
Chris@43 388 float gain = parameters->getPlayGain();
Chris@43 389 float pan = parameters->getPlayPan();
Chris@43 390
Chris@43 391 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 392 if (dtvm) {
Chris@43 393 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
Chris@43 394 buffer, gain, pan, fadeIn, fadeOut);
Chris@43 395 }
Chris@43 396
Chris@313 397 if (usesClipMixer(model)) {
Chris@313 398 return mixClipModel(model, startFrame, frameCount,
Chris@313 399 buffer, gain, pan);
Chris@313 400 }
Chris@43 401
Chris@313 402 if (usesContinuousSynth(model)) {
Chris@313 403 return mixContinuousSynthModel(model, startFrame, frameCount,
Chris@313 404 buffer, gain, pan);
Chris@43 405 }
Chris@43 406
Chris@276 407 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
Chris@276 408
Chris@43 409 return frameCount;
Chris@43 410 }
Chris@43 411
Chris@436 412 sv_frame_t
Chris@43 413 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
Chris@436 414 sv_frame_t startFrame, sv_frame_t frames,
Chris@43 415 float **buffer, float gain, float pan,
Chris@436 416 sv_frame_t fadeIn, sv_frame_t fadeOut)
Chris@43 417 {
Chris@436 418 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut);
Chris@43 419
Chris@366 420 int modelChannels = dtvm->getChannelCount();
Chris@80 421
Chris@382 422 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
Chris@80 423
Chris@382 424 for (int c = 0; c < m_channelBufCount; ++c) {
Chris@382 425 delete[] m_channelBuffer[c];
Chris@80 426 }
Chris@80 427
Chris@382 428 delete[] m_channelBuffer;
Chris@382 429 m_channelBuffer = new float *[modelChannels];
Chris@80 430
Chris@366 431 for (int c = 0; c < modelChannels; ++c) {
Chris@382 432 m_channelBuffer[c] = new float[maxFrames];
Chris@80 433 }
Chris@80 434
Chris@382 435 m_channelBufCount = modelChannels;
Chris@382 436 m_channelBufSiz = maxFrames;
Chris@43 437 }
Chris@80 438
Chris@436 439 sv_frame_t got = 0;
Chris@80 440
Chris@80 441 if (startFrame >= fadeIn/2) {
Chris@457 442 got = dtvm->getMultiChannelData(0, modelChannels - 1,
Chris@457 443 startFrame - fadeIn/2,
Chris@457 444 frames + fadeOut/2 + fadeIn/2,
Chris@457 445 m_channelBuffer);
Chris@80 446 } else {
Chris@436 447 sv_frame_t missing = fadeIn/2 - startFrame;
Chris@80 448
Chris@366 449 for (int c = 0; c < modelChannels; ++c) {
Chris@382 450 m_channelBuffer[c] += missing;
Chris@382 451 }
Chris@382 452
Chris@382 453 if (missing > 0) {
Chris@382 454 cerr << "note: channelBufSiz = " << m_channelBufSiz
Chris@382 455 << ", frames + fadeOut/2 = " << frames + fadeOut/2
Chris@382 456 << ", startFrame = " << startFrame
Chris@382 457 << ", missing = " << missing << endl;
Chris@80 458 }
Chris@80 459
Chris@457 460 got = dtvm->getMultiChannelData(0, modelChannels - 1,
Chris@457 461 startFrame,
Chris@457 462 frames + fadeOut/2,
Chris@457 463 m_channelBuffer);
Chris@80 464
Chris@366 465 for (int c = 0; c < modelChannels; ++c) {
Chris@382 466 m_channelBuffer[c] -= missing;
Chris@80 467 }
Chris@80 468
Chris@80 469 got += missing;
Chris@80 470 }
Chris@43 471
Chris@366 472 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@43 473
Chris@366 474 int sourceChannel = (c % modelChannels);
Chris@43 475
Chris@233 476 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
Chris@43 477
Chris@43 478 float channelGain = gain;
Chris@43 479 if (pan != 0.0) {
Chris@43 480 if (c == 0) {
Chris@436 481 if (pan > 0.0) channelGain *= 1.0f - pan;
Chris@43 482 } else {
Chris@436 483 if (pan < 0.0) channelGain *= pan + 1.0f;
Chris@43 484 }
Chris@43 485 }
Chris@43 486
Chris@436 487 for (sv_frame_t i = 0; i < fadeIn/2; ++i) {
Chris@43 488 float *back = buffer[c];
Chris@43 489 back -= fadeIn/2;
Chris@436 490 back[i] +=
Chris@436 491 (channelGain * m_channelBuffer[sourceChannel][i] * float(i))
Chris@436 492 / float(fadeIn);
Chris@43 493 }
Chris@43 494
Chris@436 495 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) {
Chris@43 496 float mult = channelGain;
Chris@43 497 if (i < fadeIn/2) {
Chris@436 498 mult = (mult * float(i)) / float(fadeIn);
Chris@43 499 }
Chris@43 500 if (i > frames - fadeOut/2) {
Chris@436 501 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut);
Chris@43 502 }
Chris@382 503 float val = m_channelBuffer[sourceChannel][i];
Chris@80 504 if (i >= got) val = 0.f;
Chris@80 505 buffer[c][i] += mult * val;
Chris@43 506 }
Chris@43 507 }
Chris@43 508
Chris@43 509 return got;
Chris@43 510 }
Chris@43 511
Chris@436 512 sv_frame_t
Chris@313 513 AudioGenerator::mixClipModel(Model *model,
Chris@436 514 sv_frame_t startFrame, sv_frame_t frames,
Chris@313 515 float **buffer, float gain, float pan)
Chris@43 516 {
Chris@308 517 ClipMixer *clipMixer = m_clipMixerMap[model];
Chris@308 518 if (!clipMixer) return 0;
Chris@43 519
Chris@436 520 int blocks = int(frames / m_processingBlockSize);
Chris@43 521
Chris@313 522 //!!! todo: the below -- it matters
Chris@313 523
Chris@43 524 //!!! hang on -- the fact that the audio callback play source's
Chris@43 525 //buffer is a multiple of the plugin's buffer size doesn't mean
Chris@43 526 //that we always get called for a multiple of it here (because it
Chris@43 527 //also depends on the JACK block size). how should we ensure that
Chris@43 528 //all models write the same amount in to the mix, and that we
Chris@43 529 //always have a multiple of the plugin buffer size? I guess this
Chris@43 530 //class has to be queryable for the plugin buffer size & the
Chris@43 531 //callback play source has to use that as a multiple for all the
Chris@43 532 //calls to mixModel
Chris@43 533
Chris@436 534 sv_frame_t got = blocks * m_processingBlockSize;
Chris@43 535
Chris@43 536 #ifdef DEBUG_AUDIO_GENERATOR
Chris@442 537 cout << "mixModel [clip]: start " << startFrame << ", frames " << frames
Chris@442 538 << ", blocks " << blocks << ", have " << m_noteOffs.size()
Chris@442 539 << " note-offs" << endl;
Chris@43 540 #endif
Chris@43 541
Chris@308 542 ClipMixer::NoteStart on;
Chris@308 543 ClipMixer::NoteEnd off;
Chris@43 544
Chris@275 545 NoteOffSet &noteOffs = m_noteOffs[model];
Chris@43 546
Chris@308 547 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@308 548
Chris@366 549 for (int i = 0; i < blocks; ++i) {
Chris@43 550
Chris@436 551 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
Chris@43 552
Chris@299 553 NoteList notes;
Chris@299 554 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
Chris@299 555 if (exportable) {
Chris@366 556 notes = exportable->getNotesWithin(reqStart,
Chris@366 557 reqStart + m_processingBlockSize);
Chris@299 558 }
Chris@43 559
Chris@308 560 std::vector<ClipMixer::NoteStart> starts;
Chris@308 561 std::vector<ClipMixer::NoteEnd> ends;
Chris@43 562
Chris@275 563 for (NoteList::const_iterator ni = notes.begin();
Chris@275 564 ni != notes.end(); ++ni) {
Chris@43 565
Chris@436 566 sv_frame_t noteFrame = ni->start;
Chris@43 567
Chris@275 568 if (noteFrame < reqStart ||
Chris@305 569 noteFrame >= reqStart + m_processingBlockSize) continue;
Chris@43 570
Chris@43 571 while (noteOffs.begin() != noteOffs.end() &&
Chris@275 572 noteOffs.begin()->frame <= noteFrame) {
Chris@43 573
Chris@436 574 sv_frame_t eventFrame = noteOffs.begin()->frame;
Chris@308 575 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 576
Chris@308 577 off.frameOffset = eventFrame - reqStart;
Chris@308 578 off.frequency = noteOffs.begin()->frequency;
Chris@43 579
Chris@43 580 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 581 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 582 #endif
Chris@43 583
Chris@308 584 ends.push_back(off);
Chris@43 585 noteOffs.erase(noteOffs.begin());
Chris@43 586 }
Chris@43 587
Chris@308 588 on.frameOffset = noteFrame - reqStart;
Chris@308 589 on.frequency = ni->getFrequency();
Chris@436 590 on.level = float(ni->velocity) / 127.0f;
Chris@308 591 on.pan = pan;
Chris@43 592
Chris@43 593 #ifdef DEBUG_AUDIO_GENERATOR
Chris@346 594 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
Chris@43 595 #endif
Chris@43 596
Chris@308 597 starts.push_back(on);
Chris@275 598 noteOffs.insert
Chris@308 599 (NoteOff(on.frequency, noteFrame + ni->duration));
Chris@43 600 }
Chris@43 601
Chris@43 602 while (noteOffs.begin() != noteOffs.end() &&
Chris@308 603 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
Chris@43 604
Chris@436 605 sv_frame_t eventFrame = noteOffs.begin()->frame;
Chris@308 606 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 607
Chris@308 608 off.frameOffset = eventFrame - reqStart;
Chris@308 609 off.frequency = noteOffs.begin()->frequency;
Chris@43 610
Chris@43 611 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 612 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 613 #endif
Chris@43 614
Chris@308 615 ends.push_back(off);
Chris@308 616 noteOffs.erase(noteOffs.begin());
Chris@43 617 }
Chris@43 618
Chris@366 619 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@308 620 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@308 621 }
Chris@43 622
Chris@308 623 clipMixer->mix(bufferIndexes, gain, starts, ends);
Chris@308 624 }
Chris@43 625
Chris@308 626 delete[] bufferIndexes;
Chris@43 627
Chris@43 628 return got;
Chris@43 629 }
Chris@313 630
Chris@436 631 sv_frame_t
Chris@313 632 AudioGenerator::mixContinuousSynthModel(Model *model,
Chris@436 633 sv_frame_t startFrame,
Chris@436 634 sv_frame_t frames,
Chris@313 635 float **buffer,
Chris@313 636 float gain,
Chris@313 637 float pan)
Chris@313 638 {
Chris@313 639 ContinuousSynth *synth = m_continuousSynthMap[model];
Chris@313 640 if (!synth) return 0;
Chris@313 641
Chris@313 642 // only type we support here at the moment
Chris@313 643 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
Chris@313 644 if (stvm->getScaleUnits() != "Hz") return 0;
Chris@313 645
Chris@436 646 int blocks = int(frames / m_processingBlockSize);
Chris@313 647
Chris@313 648 //!!! todo: see comment in mixClipModel
Chris@313 649
Chris@436 650 sv_frame_t got = blocks * m_processingBlockSize;
Chris@313 651
Chris@313 652 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 653 cout << "mixModel [synth]: frames " << frames
Chris@313 654 << ", blocks " << blocks << endl;
Chris@313 655 #endif
Chris@313 656
Chris@313 657 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@313 658
Chris@366 659 for (int i = 0; i < blocks; ++i) {
Chris@313 660
Chris@436 661 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
Chris@313 662
Chris@366 663 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@313 664 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@313 665 }
Chris@313 666
Chris@313 667 SparseTimeValueModel::PointList points =
Chris@313 668 stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
Chris@313 669
Chris@313 670 // by default, repeat last frequency
Chris@313 671 float f0 = 0.f;
Chris@313 672
Chris@313 673 // go straight to the last freq that is genuinely in this range
Chris@313 674 for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
Chris@313 675 itr != points.begin(); ) {
Chris@313 676 --itr;
Chris@313 677 if (itr->frame >= reqStart &&
Chris@313 678 itr->frame < reqStart + m_processingBlockSize) {
Chris@313 679 f0 = itr->value;
Chris@313 680 break;
Chris@313 681 }
Chris@313 682 }
Chris@313 683
Chris@314 684 // if we found no such frequency and the next point is further
Chris@314 685 // away than twice the model resolution, go silent (same
Chris@314 686 // criterion TimeValueLayer uses for ending a discrete curve
Chris@314 687 // segment)
Chris@314 688 if (f0 == 0.f) {
Chris@314 689 SparseTimeValueModel::PointList nextPoints =
Chris@314 690 stvm->getNextPoints(reqStart + m_processingBlockSize);
Chris@314 691 if (nextPoints.empty() ||
Chris@314 692 nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
Chris@314 693 f0 = -1.f;
Chris@314 694 }
Chris@314 695 }
Chris@314 696
Chris@315 697 // cerr << "f0 = " << f0 << endl;
Chris@313 698
Chris@313 699 synth->mix(bufferIndexes,
Chris@313 700 gain,
Chris@313 701 pan,
Chris@313 702 f0);
Chris@313 703 }
Chris@313 704
Chris@313 705 delete[] bufferIndexes;
Chris@313 706
Chris@313 707 return got;
Chris@313 708 }
Chris@313 709