annotate audioio/AudioGenerator.cpp @ 366:0876ea394902 warnfix_no_size_t

Remove size_t's, fix compiler warnings
author Chris Cannam
date Tue, 17 Jun 2014 16:23:06 +0100
parents 8d7f39df44ed
children 2484e6f95c06
rev   line source
Chris@43 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
Chris@43 2
Chris@43 3 /*
Chris@43 4 Sonic Visualiser
Chris@43 5 An audio file viewer and annotation editor.
Chris@43 6 Centre for Digital Music, Queen Mary, University of London.
Chris@43 7 This file copyright 2006 Chris Cannam.
Chris@43 8
Chris@43 9 This program is free software; you can redistribute it and/or
Chris@43 10 modify it under the terms of the GNU General Public License as
Chris@43 11 published by the Free Software Foundation; either version 2 of the
Chris@43 12 License, or (at your option) any later version. See the file
Chris@43 13 COPYING included with this distribution for more information.
Chris@43 14 */
Chris@43 15
Chris@43 16 #include "AudioGenerator.h"
Chris@43 17
Chris@43 18 #include "base/TempDirectory.h"
Chris@43 19 #include "base/PlayParameters.h"
Chris@43 20 #include "base/PlayParameterRepository.h"
Chris@43 21 #include "base/Pitch.h"
Chris@43 22 #include "base/Exceptions.h"
Chris@43 23
Chris@43 24 #include "data/model/NoteModel.h"
Chris@278 25 #include "data/model/FlexiNoteModel.h"
Chris@43 26 #include "data/model/DenseTimeValueModel.h"
Chris@313 27 #include "data/model/SparseTimeValueModel.h"
Chris@43 28 #include "data/model/SparseOneDimensionalModel.h"
Chris@299 29 #include "data/model/NoteData.h"
Chris@43 30
Chris@307 31 #include "ClipMixer.h"
Chris@313 32 #include "ContinuousSynth.h"
Chris@307 33
Chris@43 34 #include <iostream>
Chris@167 35 #include <cmath>
Chris@43 36
Chris@43 37 #include <QDir>
Chris@43 38 #include <QFile>
Chris@43 39
Chris@366 40 const int
Chris@315 41 AudioGenerator::m_processingBlockSize = 1024;
Chris@43 42
Chris@43 43 QString
Chris@43 44 AudioGenerator::m_sampleDir = "";
Chris@43 45
Chris@43 46 //#define DEBUG_AUDIO_GENERATOR 1
Chris@43 47
Chris@43 48 AudioGenerator::AudioGenerator() :
Chris@43 49 m_sourceSampleRate(0),
Chris@43 50 m_targetChannelCount(1),
Chris@348 51 m_waveType(0),
Chris@43 52 m_soloing(false)
Chris@43 53 {
Chris@108 54 initialiseSampleDir();
Chris@43 55
Chris@43 56 connect(PlayParameterRepository::getInstance(),
Chris@309 57 SIGNAL(playClipIdChanged(const Playable *, QString)),
Chris@43 58 this,
Chris@309 59 SLOT(playClipIdChanged(const Playable *, QString)));
Chris@43 60 }
Chris@43 61
Chris@43 62 AudioGenerator::~AudioGenerator()
Chris@43 63 {
Chris@177 64 #ifdef DEBUG_AUDIO_GENERATOR
Chris@233 65 SVDEBUG << "AudioGenerator::~AudioGenerator" << endl;
Chris@177 66 #endif
Chris@43 67 }
Chris@43 68
Chris@108 69 void
Chris@108 70 AudioGenerator::initialiseSampleDir()
Chris@43 71 {
Chris@108 72 if (m_sampleDir != "") return;
Chris@108 73
Chris@108 74 try {
Chris@108 75 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
Chris@108 76 } catch (DirectoryCreationFailed f) {
Chris@293 77 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
Chris@108 78 << " Failed to create temporary sample directory"
Chris@293 79 << endl;
Chris@108 80 m_sampleDir = "";
Chris@108 81 return;
Chris@108 82 }
Chris@108 83
Chris@108 84 QDir sampleResourceDir(":/samples", "*.wav");
Chris@108 85
Chris@108 86 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
Chris@108 87
Chris@108 88 QString fileName(sampleResourceDir[i]);
Chris@108 89 QFile file(sampleResourceDir.filePath(fileName));
Chris@151 90 QString target = QDir(m_sampleDir).filePath(fileName);
Chris@108 91
Chris@151 92 if (!file.copy(target)) {
Chris@293 93 cerr << "WARNING: AudioGenerator::getSampleDir: "
Chris@294 94 << "Unable to copy " << fileName
Chris@108 95 << " into temporary directory \""
Chris@293 96 << m_sampleDir << "\"" << endl;
Chris@151 97 } else {
Chris@151 98 QFile tf(target);
Chris@151 99 tf.setPermissions(tf.permissions() |
Chris@151 100 QFile::WriteOwner |
Chris@151 101 QFile::WriteUser);
Chris@108 102 }
Chris@43 103 }
Chris@43 104 }
Chris@43 105
Chris@43 106 bool
Chris@43 107 AudioGenerator::addModel(Model *model)
Chris@43 108 {
Chris@43 109 if (m_sourceSampleRate == 0) {
Chris@43 110
Chris@43 111 m_sourceSampleRate = model->getSampleRate();
Chris@43 112
Chris@43 113 } else {
Chris@43 114
Chris@43 115 DenseTimeValueModel *dtvm =
Chris@43 116 dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 117
Chris@43 118 if (dtvm) {
Chris@43 119 m_sourceSampleRate = model->getSampleRate();
Chris@43 120 return true;
Chris@43 121 }
Chris@43 122 }
Chris@307 123
Chris@313 124 if (usesClipMixer(model)) {
Chris@313 125 ClipMixer *mixer = makeClipMixerFor(model);
Chris@313 126 if (mixer) {
Chris@313 127 QMutexLocker locker(&m_mutex);
Chris@313 128 m_clipMixerMap[model] = mixer;
Chris@313 129 return true;
Chris@313 130 }
Chris@313 131 }
Chris@313 132
Chris@313 133 if (usesContinuousSynth(model)) {
Chris@313 134 ContinuousSynth *synth = makeSynthFor(model);
Chris@313 135 if (synth) {
Chris@313 136 QMutexLocker locker(&m_mutex);
Chris@313 137 m_continuousSynthMap[model] = synth;
Chris@313 138 return true;
Chris@313 139 }
Chris@43 140 }
Chris@307 141
Chris@43 142 return false;
Chris@43 143 }
Chris@43 144
Chris@43 145 void
Chris@309 146 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
Chris@43 147 {
Chris@108 148 const Model *model = dynamic_cast<const Model *>(playable);
Chris@108 149 if (!model) {
Chris@309 150 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
Chris@108 151 << playable << " is not a supported model type"
Chris@293 152 << endl;
Chris@108 153 return;
Chris@108 154 }
Chris@108 155
Chris@307 156 if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
Chris@307 157
Chris@307 158 ClipMixer *mixer = makeClipMixerFor(model);
Chris@307 159 if (mixer) {
Chris@43 160 QMutexLocker locker(&m_mutex);
Chris@307 161 m_clipMixerMap[model] = mixer;
Chris@43 162 }
Chris@43 163 }
Chris@308 164
Chris@313 165 bool
Chris@313 166 AudioGenerator::usesClipMixer(const Model *model)
Chris@43 167 {
Chris@313 168 bool clip =
Chris@313 169 (qobject_cast<const SparseOneDimensionalModel *>(model) ||
Chris@313 170 qobject_cast<const NoteModel *>(model) ||
Chris@313 171 qobject_cast<const FlexiNoteModel *>(model));
Chris@313 172 return clip;
Chris@43 173 }
Chris@43 174
Chris@313 175 bool
Chris@349 176 AudioGenerator::wantsQuieterClips(const Model *model)
Chris@349 177 {
Chris@349 178 // basically, anything that usually has sustain (like notes) or
Chris@349 179 // often has multiple sounds at once (like notes) wants to use a
Chris@349 180 // quieter level than simple click tracks
Chris@349 181 bool does =
Chris@349 182 (qobject_cast<const NoteModel *>(model) ||
Chris@349 183 qobject_cast<const FlexiNoteModel *>(model));
Chris@349 184 return does;
Chris@349 185 }
Chris@349 186
Chris@349 187 bool
Chris@313 188 AudioGenerator::usesContinuousSynth(const Model *model)
Chris@43 189 {
Chris@313 190 bool cont =
Chris@313 191 (qobject_cast<const SparseTimeValueModel *>(model));
Chris@313 192 return cont;
Chris@313 193 }
Chris@313 194
Chris@307 195 ClipMixer *
Chris@307 196 AudioGenerator::makeClipMixerFor(const Model *model)
Chris@43 197 {
Chris@309 198 QString clipId;
Chris@43 199
Chris@108 200 const Playable *playable = model;
Chris@108 201 if (!playable || !playable->canPlay()) return 0;
Chris@108 202
Chris@43 203 PlayParameters *parameters =
Chris@108 204 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 205 if (parameters) {
Chris@309 206 clipId = parameters->getPlayClipId();
Chris@43 207 }
Chris@43 208
Chris@309 209 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
Chris@276 210
Chris@309 211 if (clipId == "") {
Chris@308 212 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
Chris@276 213 return 0;
Chris@276 214 }
Chris@43 215
Chris@308 216 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
Chris@308 217 m_sourceSampleRate,
Chris@308 218 m_processingBlockSize);
Chris@307 219
Chris@308 220 float clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0f); // required
Chris@307 221
Chris@309 222 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
Chris@307 223
Chris@349 224 float level = wantsQuieterClips(model) ? 0.5 : 1.0;
Chris@349 225 if (!mixer->loadClipData(clipPath, clipF0, level)) {
Chris@308 226 delete mixer;
Chris@43 227 return 0;
Chris@43 228 }
Chris@43 229
Chris@309 230 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
Chris@43 231
Chris@308 232 return mixer;
Chris@308 233 }
Chris@43 234
Chris@313 235 ContinuousSynth *
Chris@313 236 AudioGenerator::makeSynthFor(const Model *model)
Chris@313 237 {
Chris@313 238 const Playable *playable = model;
Chris@313 239 if (!playable || !playable->canPlay()) return 0;
Chris@313 240
Chris@313 241 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
Chris@313 242 m_sourceSampleRate,
rmb456@323 243 m_processingBlockSize,
rmb456@323 244 m_waveType);
Chris@313 245
Chris@313 246 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
Chris@313 247
Chris@313 248 return synth;
Chris@313 249 }
Chris@313 250
Chris@43 251 void
Chris@43 252 AudioGenerator::removeModel(Model *model)
Chris@43 253 {
Chris@43 254 SparseOneDimensionalModel *sodm =
Chris@43 255 dynamic_cast<SparseOneDimensionalModel *>(model);
Chris@43 256 if (!sodm) return; // nothing to do
Chris@43 257
Chris@43 258 QMutexLocker locker(&m_mutex);
Chris@43 259
Chris@308 260 if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
Chris@43 261
Chris@308 262 ClipMixer *mixer = m_clipMixerMap[sodm];
Chris@308 263 m_clipMixerMap.erase(sodm);
Chris@308 264 delete mixer;
Chris@43 265 }
Chris@43 266
Chris@43 267 void
Chris@43 268 AudioGenerator::clearModels()
Chris@43 269 {
Chris@43 270 QMutexLocker locker(&m_mutex);
Chris@308 271
Chris@308 272 while (!m_clipMixerMap.empty()) {
Chris@308 273 ClipMixer *mixer = m_clipMixerMap.begin()->second;
Chris@308 274 m_clipMixerMap.erase(m_clipMixerMap.begin());
Chris@308 275 delete mixer;
Chris@43 276 }
Chris@43 277 }
Chris@43 278
Chris@43 279 void
Chris@43 280 AudioGenerator::reset()
Chris@43 281 {
Chris@43 282 QMutexLocker locker(&m_mutex);
Chris@308 283
Chris@308 284 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@43 285 if (i->second) {
Chris@308 286 i->second->reset();
Chris@43 287 }
Chris@43 288 }
Chris@43 289
Chris@43 290 m_noteOffs.clear();
Chris@43 291 }
Chris@43 292
Chris@43 293 void
Chris@366 294 AudioGenerator::setTargetChannelCount(int targetChannelCount)
Chris@43 295 {
Chris@43 296 if (m_targetChannelCount == targetChannelCount) return;
Chris@43 297
Chris@233 298 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
Chris@43 299
Chris@43 300 QMutexLocker locker(&m_mutex);
Chris@43 301 m_targetChannelCount = targetChannelCount;
Chris@43 302
Chris@308 303 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
Chris@308 304 if (i->second) i->second->setChannelCount(targetChannelCount);
Chris@43 305 }
Chris@43 306 }
Chris@43 307
Chris@366 308 int
Chris@43 309 AudioGenerator::getBlockSize() const
Chris@43 310 {
Chris@305 311 return m_processingBlockSize;
Chris@43 312 }
Chris@43 313
Chris@43 314 void
Chris@43 315 AudioGenerator::setSoloModelSet(std::set<Model *> s)
Chris@43 316 {
Chris@43 317 QMutexLocker locker(&m_mutex);
Chris@43 318
Chris@43 319 m_soloModelSet = s;
Chris@43 320 m_soloing = true;
Chris@43 321 }
Chris@43 322
Chris@43 323 void
Chris@43 324 AudioGenerator::clearSoloModelSet()
Chris@43 325 {
Chris@43 326 QMutexLocker locker(&m_mutex);
Chris@43 327
Chris@43 328 m_soloModelSet.clear();
Chris@43 329 m_soloing = false;
Chris@43 330 }
Chris@43 331
Chris@366 332 int
Chris@366 333 AudioGenerator::mixModel(Model *model, int startFrame, int frameCount,
Chris@366 334 float **buffer, int fadeIn, int fadeOut)
Chris@43 335 {
Chris@43 336 if (m_sourceSampleRate == 0) {
Chris@293 337 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
Chris@43 338 return frameCount;
Chris@43 339 }
Chris@43 340
Chris@43 341 QMutexLocker locker(&m_mutex);
Chris@43 342
Chris@108 343 Playable *playable = model;
Chris@108 344 if (!playable || !playable->canPlay()) return frameCount;
Chris@108 345
Chris@43 346 PlayParameters *parameters =
Chris@108 347 PlayParameterRepository::getInstance()->getPlayParameters(playable);
Chris@43 348 if (!parameters) return frameCount;
Chris@43 349
Chris@43 350 bool playing = !parameters->isPlayMuted();
Chris@43 351 if (!playing) {
Chris@43 352 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 353 cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
Chris@43 354 #endif
Chris@43 355 return frameCount;
Chris@43 356 }
Chris@43 357
Chris@43 358 if (m_soloing) {
Chris@43 359 if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
Chris@43 360 #ifdef DEBUG_AUDIO_GENERATOR
Chris@293 361 cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
Chris@43 362 #endif
Chris@43 363 return frameCount;
Chris@43 364 }
Chris@43 365 }
Chris@43 366
Chris@43 367 float gain = parameters->getPlayGain();
Chris@43 368 float pan = parameters->getPlayPan();
Chris@43 369
Chris@43 370 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
Chris@43 371 if (dtvm) {
Chris@43 372 return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
Chris@43 373 buffer, gain, pan, fadeIn, fadeOut);
Chris@43 374 }
Chris@43 375
Chris@313 376 if (usesClipMixer(model)) {
Chris@313 377 return mixClipModel(model, startFrame, frameCount,
Chris@313 378 buffer, gain, pan);
Chris@313 379 }
Chris@43 380
Chris@313 381 if (usesContinuousSynth(model)) {
Chris@313 382 return mixContinuousSynthModel(model, startFrame, frameCount,
Chris@313 383 buffer, gain, pan);
Chris@43 384 }
Chris@43 385
Chris@276 386 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
Chris@276 387
Chris@43 388 return frameCount;
Chris@43 389 }
Chris@43 390
Chris@366 391 int
Chris@43 392 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
Chris@366 393 int startFrame, int frames,
Chris@43 394 float **buffer, float gain, float pan,
Chris@366 395 int fadeIn, int fadeOut)
Chris@43 396 {
Chris@80 397 static float **channelBuffer = 0;
Chris@366 398 static int channelBufSiz = 0;
Chris@366 399 static int channelBufCount = 0;
Chris@43 400
Chris@366 401 int totalFrames = frames + fadeIn/2 + fadeOut/2;
Chris@43 402
Chris@366 403 int modelChannels = dtvm->getChannelCount();
Chris@80 404
Chris@80 405 if (channelBufSiz < totalFrames || channelBufCount < modelChannels) {
Chris@80 406
Chris@366 407 for (int c = 0; c < channelBufCount; ++c) {
Chris@80 408 delete[] channelBuffer[c];
Chris@80 409 }
Chris@80 410
Chris@43 411 delete[] channelBuffer;
Chris@80 412 channelBuffer = new float *[modelChannels];
Chris@80 413
Chris@366 414 for (int c = 0; c < modelChannels; ++c) {
Chris@80 415 channelBuffer[c] = new float[totalFrames];
Chris@80 416 }
Chris@80 417
Chris@80 418 channelBufCount = modelChannels;
Chris@43 419 channelBufSiz = totalFrames;
Chris@43 420 }
Chris@80 421
Chris@366 422 int got = 0;
Chris@80 423
Chris@80 424 if (startFrame >= fadeIn/2) {
Chris@80 425 got = dtvm->getData(0, modelChannels - 1,
Chris@80 426 startFrame - fadeIn/2,
Chris@80 427 frames + fadeOut/2 + fadeIn/2,
Chris@80 428 channelBuffer);
Chris@80 429 } else {
Chris@366 430 int missing = fadeIn/2 - startFrame;
Chris@80 431
Chris@366 432 for (int c = 0; c < modelChannels; ++c) {
Chris@80 433 channelBuffer[c] += missing;
Chris@80 434 }
Chris@80 435
Chris@80 436 got = dtvm->getData(0, modelChannels - 1,
Chris@80 437 startFrame,
Chris@80 438 frames + fadeOut/2,
Chris@80 439 channelBuffer);
Chris@80 440
Chris@366 441 for (int c = 0; c < modelChannels; ++c) {
Chris@80 442 channelBuffer[c] -= missing;
Chris@80 443 }
Chris@80 444
Chris@80 445 got += missing;
Chris@80 446 }
Chris@43 447
Chris@366 448 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@43 449
Chris@366 450 int sourceChannel = (c % modelChannels);
Chris@43 451
Chris@233 452 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
Chris@43 453
Chris@43 454 float channelGain = gain;
Chris@43 455 if (pan != 0.0) {
Chris@43 456 if (c == 0) {
Chris@43 457 if (pan > 0.0) channelGain *= 1.0 - pan;
Chris@43 458 } else {
Chris@43 459 if (pan < 0.0) channelGain *= pan + 1.0;
Chris@43 460 }
Chris@43 461 }
Chris@43 462
Chris@366 463 for (int i = 0; i < fadeIn/2; ++i) {
Chris@43 464 float *back = buffer[c];
Chris@43 465 back -= fadeIn/2;
Chris@80 466 back[i] += (channelGain * channelBuffer[sourceChannel][i] * i) / fadeIn;
Chris@43 467 }
Chris@43 468
Chris@366 469 for (int i = 0; i < frames + fadeOut/2; ++i) {
Chris@43 470 float mult = channelGain;
Chris@43 471 if (i < fadeIn/2) {
Chris@43 472 mult = (mult * i) / fadeIn;
Chris@43 473 }
Chris@43 474 if (i > frames - fadeOut/2) {
Chris@43 475 mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut;
Chris@43 476 }
Chris@80 477 float val = channelBuffer[sourceChannel][i];
Chris@80 478 if (i >= got) val = 0.f;
Chris@80 479 buffer[c][i] += mult * val;
Chris@43 480 }
Chris@43 481 }
Chris@43 482
Chris@43 483 return got;
Chris@43 484 }
Chris@43 485
Chris@366 486 int
Chris@313 487 AudioGenerator::mixClipModel(Model *model,
Chris@366 488 int startFrame, int frames,
Chris@313 489 float **buffer, float gain, float pan)
Chris@43 490 {
Chris@308 491 ClipMixer *clipMixer = m_clipMixerMap[model];
Chris@308 492 if (!clipMixer) return 0;
Chris@43 493
Chris@366 494 int blocks = frames / m_processingBlockSize;
Chris@43 495
Chris@313 496 //!!! todo: the below -- it matters
Chris@313 497
Chris@43 498 //!!! hang on -- the fact that the audio callback play source's
Chris@43 499 //buffer is a multiple of the plugin's buffer size doesn't mean
Chris@43 500 //that we always get called for a multiple of it here (because it
Chris@43 501 //also depends on the JACK block size). how should we ensure that
Chris@43 502 //all models write the same amount in to the mix, and that we
Chris@43 503 //always have a multiple of the plugin buffer size? I guess this
Chris@43 504 //class has to be queryable for the plugin buffer size & the
Chris@43 505 //callback play source has to use that as a multiple for all the
Chris@43 506 //calls to mixModel
Chris@43 507
Chris@366 508 int got = blocks * m_processingBlockSize;
Chris@43 509
Chris@43 510 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 511 cout << "mixModel [clip]: frames " << frames
Chris@293 512 << ", blocks " << blocks << endl;
Chris@43 513 #endif
Chris@43 514
Chris@308 515 ClipMixer::NoteStart on;
Chris@308 516 ClipMixer::NoteEnd off;
Chris@43 517
Chris@275 518 NoteOffSet &noteOffs = m_noteOffs[model];
Chris@43 519
Chris@308 520 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@308 521
Chris@366 522 for (int i = 0; i < blocks; ++i) {
Chris@43 523
Chris@366 524 int reqStart = startFrame + i * m_processingBlockSize;
Chris@43 525
Chris@299 526 NoteList notes;
Chris@299 527 NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
Chris@299 528 if (exportable) {
Chris@366 529 notes = exportable->getNotesWithin(reqStart,
Chris@366 530 reqStart + m_processingBlockSize);
Chris@299 531 }
Chris@43 532
Chris@308 533 std::vector<ClipMixer::NoteStart> starts;
Chris@308 534 std::vector<ClipMixer::NoteEnd> ends;
Chris@43 535
Chris@275 536 for (NoteList::const_iterator ni = notes.begin();
Chris@275 537 ni != notes.end(); ++ni) {
Chris@43 538
Chris@366 539 int noteFrame = ni->start;
Chris@43 540
Chris@275 541 if (noteFrame < reqStart ||
Chris@305 542 noteFrame >= reqStart + m_processingBlockSize) continue;
Chris@43 543
Chris@43 544 while (noteOffs.begin() != noteOffs.end() &&
Chris@275 545 noteOffs.begin()->frame <= noteFrame) {
Chris@43 546
Chris@366 547 int eventFrame = noteOffs.begin()->frame;
Chris@308 548 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 549
Chris@308 550 off.frameOffset = eventFrame - reqStart;
Chris@308 551 off.frequency = noteOffs.begin()->frequency;
Chris@43 552
Chris@43 553 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 554 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 555 #endif
Chris@43 556
Chris@308 557 ends.push_back(off);
Chris@43 558 noteOffs.erase(noteOffs.begin());
Chris@43 559 }
Chris@43 560
Chris@308 561 on.frameOffset = noteFrame - reqStart;
Chris@308 562 on.frequency = ni->getFrequency();
Chris@308 563 on.level = float(ni->velocity) / 127.0;
Chris@308 564 on.pan = pan;
Chris@43 565
Chris@43 566 #ifdef DEBUG_AUDIO_GENERATOR
Chris@346 567 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
Chris@43 568 #endif
Chris@43 569
Chris@308 570 starts.push_back(on);
Chris@275 571 noteOffs.insert
Chris@308 572 (NoteOff(on.frequency, noteFrame + ni->duration));
Chris@43 573 }
Chris@43 574
Chris@43 575 while (noteOffs.begin() != noteOffs.end() &&
Chris@308 576 noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
Chris@43 577
Chris@366 578 int eventFrame = noteOffs.begin()->frame;
Chris@308 579 if (eventFrame < reqStart) eventFrame = reqStart;
Chris@43 580
Chris@308 581 off.frameOffset = eventFrame - reqStart;
Chris@308 582 off.frequency = noteOffs.begin()->frequency;
Chris@43 583
Chris@43 584 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 585 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
Chris@43 586 #endif
Chris@43 587
Chris@308 588 ends.push_back(off);
Chris@308 589 noteOffs.erase(noteOffs.begin());
Chris@43 590 }
Chris@43 591
Chris@366 592 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@308 593 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@308 594 }
Chris@43 595
Chris@308 596 clipMixer->mix(bufferIndexes, gain, starts, ends);
Chris@308 597 }
Chris@43 598
Chris@308 599 delete[] bufferIndexes;
Chris@43 600
Chris@43 601 return got;
Chris@43 602 }
Chris@313 603
Chris@366 604 int
Chris@313 605 AudioGenerator::mixContinuousSynthModel(Model *model,
Chris@366 606 int startFrame,
Chris@366 607 int frames,
Chris@313 608 float **buffer,
Chris@313 609 float gain,
Chris@313 610 float pan)
Chris@313 611 {
Chris@313 612 ContinuousSynth *synth = m_continuousSynthMap[model];
Chris@313 613 if (!synth) return 0;
Chris@313 614
Chris@313 615 // only type we support here at the moment
Chris@313 616 SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
Chris@313 617 if (stvm->getScaleUnits() != "Hz") return 0;
Chris@313 618
Chris@366 619 int blocks = frames / m_processingBlockSize;
Chris@313 620
Chris@313 621 //!!! todo: see comment in mixClipModel
Chris@313 622
Chris@366 623 int got = blocks * m_processingBlockSize;
Chris@313 624
Chris@313 625 #ifdef DEBUG_AUDIO_GENERATOR
Chris@313 626 cout << "mixModel [synth]: frames " << frames
Chris@313 627 << ", blocks " << blocks << endl;
Chris@313 628 #endif
Chris@313 629
Chris@313 630 float **bufferIndexes = new float *[m_targetChannelCount];
Chris@313 631
Chris@366 632 for (int i = 0; i < blocks; ++i) {
Chris@313 633
Chris@366 634 int reqStart = startFrame + i * m_processingBlockSize;
Chris@313 635
Chris@366 636 for (int c = 0; c < m_targetChannelCount; ++c) {
Chris@313 637 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
Chris@313 638 }
Chris@313 639
Chris@313 640 SparseTimeValueModel::PointList points =
Chris@313 641 stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
Chris@313 642
Chris@313 643 // by default, repeat last frequency
Chris@313 644 float f0 = 0.f;
Chris@313 645
Chris@313 646 // go straight to the last freq that is genuinely in this range
Chris@313 647 for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
Chris@313 648 itr != points.begin(); ) {
Chris@313 649 --itr;
Chris@313 650 if (itr->frame >= reqStart &&
Chris@313 651 itr->frame < reqStart + m_processingBlockSize) {
Chris@313 652 f0 = itr->value;
Chris@313 653 break;
Chris@313 654 }
Chris@313 655 }
Chris@313 656
Chris@314 657 // if we found no such frequency and the next point is further
Chris@314 658 // away than twice the model resolution, go silent (same
Chris@314 659 // criterion TimeValueLayer uses for ending a discrete curve
Chris@314 660 // segment)
Chris@314 661 if (f0 == 0.f) {
Chris@314 662 SparseTimeValueModel::PointList nextPoints =
Chris@314 663 stvm->getNextPoints(reqStart + m_processingBlockSize);
Chris@314 664 if (nextPoints.empty() ||
Chris@314 665 nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
Chris@314 666 f0 = -1.f;
Chris@314 667 }
Chris@314 668 }
Chris@314 669
Chris@315 670 // cerr << "f0 = " << f0 << endl;
Chris@313 671
Chris@313 672 synth->mix(bufferIndexes,
Chris@313 673 gain,
Chris@313 674 pan,
Chris@313 675 f0);
Chris@313 676 }
Chris@313 677
Chris@313 678 delete[] bufferIndexes;
Chris@313 679
Chris@313 680 return got;
Chris@313 681 }
Chris@313 682