Mercurial > hg > svapp
changeset 682:161063152ddd by-id
Overhaul audio generator for ModelById
author | Chris Cannam |
---|---|
date | Tue, 02 Jul 2019 21:10:25 +0100 |
parents | c7406ebcd51c |
children | 0736beb8b852 |
files | audio/AudioCallbackPlaySource.cpp audio/AudioCallbackPlaySource.h audio/AudioGenerator.cpp audio/AudioGenerator.h |
diffstat | 4 files changed, 132 insertions(+), 127 deletions(-) [+] |
line wrap: on
line diff
--- a/audio/AudioCallbackPlaySource.cpp Mon Jun 24 16:14:12 2019 +0100 +++ b/audio/AudioCallbackPlaySource.cpp Tue Jul 02 21:10:25 2019 +0100 @@ -151,15 +151,19 @@ } void -AudioCallbackPlaySource::addModel(Model *model) +AudioCallbackPlaySource::addModel(ModelId modelId) { - if (m_models.find(model) != m_models.end()) return; + if (m_models.find(modelId) != m_models.end()) return; - bool willPlay = m_audioGenerator->addModel(model); + bool willPlay = m_audioGenerator->addModel(modelId); + + auto model = ModelById::get(modelId); + if (!model) return; m_mutex.lock(); - m_models.insert(model); + m_models.insert(modelId); + if (model->getEndFrame() > m_lastModelEndFrame) { m_lastModelEndFrame = model->getEndFrame(); } @@ -167,7 +171,7 @@ bool buffersIncreased = false, srChanged = false; int modelChannels = 1; - ReadOnlyWaveFileModel *rowfm = qobject_cast<ReadOnlyWaveFileModel *>(model); + auto rowfm = std::dynamic_pointer_cast<ReadOnlyWaveFileModel>(model); if (rowfm) modelChannels = rowfm->getChannelCount(); if (modelChannels > m_sourceChannelCount) { m_sourceChannelCount = modelChannels; @@ -194,20 +198,19 @@ bool conflicting = false; - for (std::set<Model *>::const_iterator i = m_models.begin(); - i != m_models.end(); ++i) { + for (ModelId otherId: m_models) { // Only read-only wave file models should be // considered conflicting -- writable wave file models // are derived and we shouldn't take their rates into // account. Also, don't give any particular weight to // a file that's already playing at the wrong rate // anyway - ReadOnlyWaveFileModel *other = - qobject_cast<ReadOnlyWaveFileModel *>(*i); - if (other && other != rowfm && + if (otherId == modelId) continue; + auto other = ModelById::getAs<ReadOnlyWaveFileModel>(otherId); + if (other && other->getSampleRate() != model->getSampleRate() && other->getSampleRate() == m_sourceSampleRate) { - SVDEBUG << "AudioCallbackPlaySource::addModel: Conflicting wave file model " << *i << " found" << endl; + SVDEBUG << "AudioCallbackPlaySource::addModel: Conflicting wave file model " << otherId << " found" << endl; conflicting = true; break; } @@ -291,7 +294,7 @@ SVDEBUG << "AudioCallbackPlaySource::addModel: now have " << m_models.size() << " model(s)" << endl; #endif - connect(model, SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)), + connect(model.get(), SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)), this, SLOT(modelChangedWithin(sv_frame_t, sv_frame_t))); #ifdef DEBUG_AUDIO_PLAY_SOURCE @@ -318,37 +321,31 @@ } void -AudioCallbackPlaySource::removeModel(Model *model) +AudioCallbackPlaySource::removeModel(ModelId modelId) { + auto model = ModelById::get(modelId); + if (!model) return; + m_mutex.lock(); #ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource::removeModel(" << model << ")" << endl; + cout << "AudioCallbackPlaySource::removeModel(" << modelId << ")" << endl; #endif - disconnect(model, SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)), + disconnect(model.get(), SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)), this, SLOT(modelChangedWithin(sv_frame_t, sv_frame_t))); - m_models.erase(model); - - // I don't think we have to do this any more: if a new model is - // loaded at a different rate, we'll hit the non-conflicting path - // in addModel and the rate will be updated without problems; but - // if a new model is loaded at the rate that we were using for the - // last one, then we save work by not having reset this here - // -// if (m_models.empty()) { -// m_sourceSampleRate = 0; -// } + m_models.erase(modelId); sv_frame_t lastEnd = 0; - for (std::set<Model *>::const_iterator i = m_models.begin(); - i != m_models.end(); ++i) { + for (ModelId otherId: m_models) { #ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource::removeModel(" << model << "): checking end frame on model " << *i << endl; + cout << "AudioCallbackPlaySource::removeModel(" << modelId << "): checking end frame on model " << otherId << endl; #endif - if ((*i)->getEndFrame() > lastEnd) { - lastEnd = (*i)->getEndFrame(); + if (auto other = ModelById::get(otherId)) { + if (other->getEndFrame() > lastEnd) { + lastEnd = other->getEndFrame(); + } } #ifdef DEBUG_AUDIO_PLAY_SOURCE cout << "(done, lastEnd now " << lastEnd << ")" << endl; @@ -356,7 +353,7 @@ } m_lastModelEndFrame = lastEnd; - m_audioGenerator->removeModel(model); + m_audioGenerator->removeModel(modelId); if (m_models.empty()) { m_sourceSampleRate = 0; @@ -1014,7 +1011,7 @@ } void -AudioCallbackPlaySource::setSoloModelSet(std::set<Model *> s) +AudioCallbackPlaySource::setSoloModelSet(std::set<ModelId> s) { m_audioGenerator->setSoloModelSet(s); clearRingBuffers(); @@ -1664,7 +1661,7 @@ } } - for (std::set<Model *>::iterator mi = m_models.begin(); + for (std::set<ModelId>::iterator mi = m_models.begin(); mi != m_models.end(); ++mi) { (void) m_audioGenerator->mixModel(*mi, chunkStart,
--- a/audio/AudioCallbackPlaySource.h Mon Jun 24 16:14:12 2019 +0100 +++ b/audio/AudioCallbackPlaySource.h Tue Jul 02 21:10:25 2019 +0100 @@ -29,6 +29,7 @@ #include "base/Thread.h" #include "base/RealTime.h" +#include "data/model/Model.h" #include <samplerate.h> @@ -73,12 +74,12 @@ * models. The models must match in sample rate, but they don't * have to have identical numbers of channels. */ - virtual void addModel(Model *model); + virtual void addModel(ModelId model); /** * Remove a model. */ - virtual void removeModel(Model *model); + virtual void removeModel(ModelId model); /** * Remove all models. (Silence will ensue.) @@ -292,7 +293,7 @@ /** * Specify that only the given set of models should be played. */ - void setSoloModelSet(std::set<Model *>s); + void setSoloModelSet(std::set<ModelId>s); /** * Specify that all models should be played as normal (if not @@ -344,7 +345,7 @@ } }; - std::set<Model *> m_models; + std::set<ModelId> m_models; RingBufferVector *m_readBuffers; RingBufferVector *m_writeBuffers; sv_frame_t m_readBufferFill;
--- a/audio/AudioGenerator.cpp Mon Jun 24 16:14:12 2019 +0100 +++ b/audio/AudioGenerator.cpp Tue Jul 02 21:10:25 2019 +0100 @@ -56,9 +56,9 @@ initialiseSampleDir(); connect(PlayParameterRepository::getInstance(), - SIGNAL(playClipIdChanged(const Playable *, QString)), + SIGNAL(playClipIdChanged(int, QString)), this, - SLOT(playClipIdChanged(const Playable *, QString))); + SLOT(playClipIdChanged(int, QString))); } AudioGenerator::~AudioGenerator() @@ -111,16 +111,19 @@ } bool -AudioGenerator::addModel(Model *model) +AudioGenerator::addModel(ModelId modelId) { + auto model = ModelById::get(modelId); + if (!model) return false; + if (!model->canPlay()) return false; + if (m_sourceSampleRate == 0) { m_sourceSampleRate = model->getSampleRate(); } else { - DenseTimeValueModel *dtvm = - dynamic_cast<DenseTimeValueModel *>(model); + auto dtvm = std::dynamic_pointer_cast<DenseTimeValueModel>(model); if (dtvm) { m_sourceSampleRate = model->getSampleRate(); @@ -128,28 +131,31 @@ } } - const Playable *playable = model; - if (!playable || !playable->canPlay()) return 0; + PlayParameters *parameters = + PlayParameterRepository::getInstance()->getPlayParameters + (modelId.untyped); - PlayParameters *parameters = - PlayParameterRepository::getInstance()->getPlayParameters(playable); + if (!parameters) { + SVCERR << "WARNING: Model with canPlay true is not known to PlayParameterRepository" << endl; + return false; + } bool willPlay = !parameters->isPlayMuted(); - if (usesClipMixer(model)) { - ClipMixer *mixer = makeClipMixerFor(model); + if (usesClipMixer(modelId)) { + ClipMixer *mixer = makeClipMixerFor(modelId); if (mixer) { QMutexLocker locker(&m_mutex); - m_clipMixerMap[model->getId()] = mixer; + m_clipMixerMap[modelId] = mixer; return willPlay; } } - if (usesContinuousSynth(model)) { - ContinuousSynth *synth = makeSynthFor(model); + if (usesContinuousSynth(modelId)) { + ContinuousSynth *synth = makeSynthFor(modelId); if (synth) { QMutexLocker locker(&m_mutex); - m_continuousSynthMap[model->getId()] = synth; + m_continuousSynthMap[modelId] = synth; return willPlay; } } @@ -158,8 +164,9 @@ } void -AudioGenerator::playClipIdChanged(const Playable *playable, QString) +AudioGenerator::playClipIdChanged(int playableId, QString) { + /*!!! const Model *model = dynamic_cast<const Model *>(playable); if (!model) { cerr << "WARNING: AudioGenerator::playClipIdChanged: playable " @@ -167,65 +174,67 @@ << endl; return; } - - if (m_clipMixerMap.find(model->getId()) == m_clipMixerMap.end()) { + */ + ModelId modelId; + modelId.untyped = playableId; + + if (m_clipMixerMap.find(modelId) == m_clipMixerMap.end()) { return; } - ClipMixer *mixer = makeClipMixerFor(model); + ClipMixer *mixer = makeClipMixerFor(modelId); if (mixer) { QMutexLocker locker(&m_mutex); - m_clipMixerMap[model->getId()] = mixer; + ClipMixer *oldMixer = m_clipMixerMap[modelId]; + m_clipMixerMap[modelId] = mixer; + delete oldMixer; } } bool -AudioGenerator::usesClipMixer(const Model *model) +AudioGenerator::usesClipMixer(ModelId modelId) { bool clip = - (qobject_cast<const SparseOneDimensionalModel *>(model) || - qobject_cast<const NoteModel *>(model)); + (ModelById::isa<SparseOneDimensionalModel>(modelId) || + ModelById::isa<NoteModel>(modelId)); return clip; } bool -AudioGenerator::wantsQuieterClips(const Model *model) +AudioGenerator::wantsQuieterClips(ModelId modelId) { // basically, anything that usually has sustain (like notes) or // often has multiple sounds at once (like notes) wants to use a // quieter level than simple click tracks - bool does = (qobject_cast<const NoteModel *>(model)); + bool does = (ModelById::isa<NoteModel>(modelId)); return does; } bool -AudioGenerator::usesContinuousSynth(const Model *model) +AudioGenerator::usesContinuousSynth(ModelId modelId) { - bool cont = - (qobject_cast<const SparseTimeValueModel *>(model)); + bool cont = (ModelById::isa<SparseTimeValueModel>(modelId)); return cont; } ClipMixer * -AudioGenerator::makeClipMixerFor(const Model *model) +AudioGenerator::makeClipMixerFor(ModelId modelId) { QString clipId; - const Playable *playable = model; - if (!playable || !playable->canPlay()) return nullptr; - PlayParameters *parameters = - PlayParameterRepository::getInstance()->getPlayParameters(playable); + PlayParameterRepository::getInstance()->getPlayParameters + (modelId.untyped); if (parameters) { clipId = parameters->getPlayClipId(); } #ifdef DEBUG_AUDIO_GENERATOR - std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl; + std::cerr << "AudioGenerator::makeClipMixerFor(" << modelId << "): sample id = " << clipId << std::endl; #endif if (clipId == "") { - SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl; + SVDEBUG << "AudioGenerator::makeClipMixerFor(" << modelId << "): no sample, skipping" << endl; return nullptr; } @@ -237,7 +246,7 @@ QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId); - double level = wantsQuieterClips(model) ? 0.5 : 1.0; + double level = wantsQuieterClips(modelId) ? 0.5 : 1.0; if (!mixer->loadClipData(clipPath, clipF0, level)) { delete mixer; return nullptr; @@ -251,11 +260,8 @@ } ContinuousSynth * -AudioGenerator::makeSynthFor(const Model *model) +AudioGenerator::makeSynthFor(ModelId) { - const Playable *playable = model; - if (!playable || !playable->canPlay()) return nullptr; - ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount, m_sourceSampleRate, m_processingBlockSize, @@ -269,20 +275,16 @@ } void -AudioGenerator::removeModel(Model *model) +AudioGenerator::removeModel(ModelId modelId) { - SparseOneDimensionalModel *sodm = - dynamic_cast<SparseOneDimensionalModel *>(model); - if (!sodm) return; // nothing to do - QMutexLocker locker(&m_mutex); - if (m_clipMixerMap.find(sodm->getId()) == m_clipMixerMap.end()) { + if (m_clipMixerMap.find(modelId) == m_clipMixerMap.end()) { return; } - ClipMixer *mixer = m_clipMixerMap[sodm->getId()]; - m_clipMixerMap.erase(sodm->getId()); + ClipMixer *mixer = m_clipMixerMap[modelId]; + m_clipMixerMap.erase(modelId); delete mixer; } @@ -339,7 +341,7 @@ } void -AudioGenerator::setSoloModelSet(std::set<Model *> s) +AudioGenerator::setSoloModelSet(std::set<ModelId> s) { QMutexLocker locker(&m_mutex); @@ -357,7 +359,7 @@ } sv_frame_t -AudioGenerator::mixModel(Model *model, +AudioGenerator::mixModel(ModelId modelId, sv_frame_t startFrame, sv_frame_t frameCount, float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut) @@ -369,25 +371,26 @@ QMutexLocker locker(&m_mutex); - Playable *playable = model; - if (!playable || !playable->canPlay()) return frameCount; + auto model = ModelById::get(modelId); + if (!model || !model->canPlay()) return frameCount; PlayParameters *parameters = - PlayParameterRepository::getInstance()->getPlayParameters(playable); + PlayParameterRepository::getInstance()->getPlayParameters + (modelId.untyped); if (!parameters) return frameCount; bool playing = !parameters->isPlayMuted(); if (!playing) { #ifdef DEBUG_AUDIO_GENERATOR - cout << "AudioGenerator::mixModel(" << model << "): muted" << endl; + cout << "AudioGenerator::mixModel(" << modelId << "): muted" << endl; #endif return frameCount; } if (m_soloing) { - if (m_soloModelSet.find(model) == m_soloModelSet.end()) { + if (m_soloModelSet.find(modelId) == m_soloModelSet.end()) { #ifdef DEBUG_AUDIO_GENERATOR - cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl; + cout << "AudioGenerator::mixModel(" << modelId << "): not one of the solo'd models" << endl; #endif return frameCount; } @@ -396,35 +399,37 @@ float gain = parameters->getPlayGain(); float pan = parameters->getPlayPan(); - DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model); - if (dtvm) { - return mixDenseTimeValueModel(dtvm, startFrame, frameCount, + if (std::dynamic_pointer_cast<DenseTimeValueModel>(model)) { + return mixDenseTimeValueModel(modelId, startFrame, frameCount, buffer, gain, pan, fadeIn, fadeOut); } - if (usesClipMixer(model)) { - return mixClipModel(model, startFrame, frameCount, + if (usesClipMixer(modelId)) { + return mixClipModel(modelId, startFrame, frameCount, buffer, gain, pan); } - if (usesContinuousSynth(model)) { - return mixContinuousSynthModel(model, startFrame, frameCount, + if (usesContinuousSynth(modelId)) { + return mixContinuousSynthModel(modelId, startFrame, frameCount, buffer, gain, pan); } - std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; + std::cerr << "AudioGenerator::mixModel: WARNING: Model " << modelId << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; return frameCount; } sv_frame_t -AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, +AudioGenerator::mixDenseTimeValueModel(ModelId modelId, sv_frame_t startFrame, sv_frame_t frames, float **buffer, float gain, float pan, sv_frame_t fadeIn, sv_frame_t fadeOut) { sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut); + auto dtvm = ModelById::getAs<DenseTimeValueModel>(modelId); + if (!dtvm) return 0; + int modelChannels = dtvm->getChannelCount(); if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) { @@ -519,13 +524,15 @@ } sv_frame_t -AudioGenerator::mixClipModel(Model *model, +AudioGenerator::mixClipModel(ModelId modelId, sv_frame_t startFrame, sv_frame_t frames, float **buffer, float gain, float pan) { - ClipMixer *clipMixer = m_clipMixerMap[model->getId()]; + ClipMixer *clipMixer = m_clipMixerMap[modelId]; if (!clipMixer) return 0; + auto exportable = ModelById::getAs<NoteExportable>(modelId); + int blocks = int(frames / m_processingBlockSize); //!!! todo: the below -- it matters @@ -551,7 +558,7 @@ ClipMixer::NoteStart on; ClipMixer::NoteEnd off; - NoteOffSet ¬eOffs = m_noteOffs[model->getId()]; + NoteOffSet ¬eOffs = m_noteOffs[modelId]; float **bufferIndexes = new float *[m_targetChannelCount]; @@ -562,7 +569,6 @@ sv_frame_t reqStart = startFrame + i * m_processingBlockSize; NoteList notes; - NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); if (exportable) { notes = exportable->getNotesStartingWithin(reqStart, m_processingBlockSize); @@ -677,18 +683,19 @@ } sv_frame_t -AudioGenerator::mixContinuousSynthModel(Model *model, +AudioGenerator::mixContinuousSynthModel(ModelId modelId, sv_frame_t startFrame, sv_frame_t frames, float **buffer, float gain, float pan) { - ContinuousSynth *synth = m_continuousSynthMap[model->getId()]; + ContinuousSynth *synth = m_continuousSynthMap[modelId]; if (!synth) return 0; // only type we support here at the moment - SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); + auto stvm = ModelById::getAs<SparseTimeValueModel>(modelId); + if (!stvm) return 0; if (stvm->getScaleUnits() != "Hz") return 0; int blocks = int(frames / m_processingBlockSize);
--- a/audio/AudioGenerator.h Mon Jun 24 16:14:12 2019 +0100 +++ b/audio/AudioGenerator.h Tue Jul 02 21:10:25 2019 +0100 @@ -48,12 +48,12 @@ * played. The model will be added regardless of the return * value. */ - virtual bool addModel(Model *model); + virtual bool addModel(ModelId model); /** * Remove a model. */ - virtual void removeModel(Model *model); + virtual void removeModel(ModelId model); /** * Remove all models. @@ -81,7 +81,7 @@ /** * Mix a single model into an output buffer. */ - virtual sv_frame_t mixModel(Model *model, + virtual sv_frame_t mixModel(ModelId model, sv_frame_t startFrame, sv_frame_t frameCount, float **buffer, @@ -91,7 +91,7 @@ /** * Specify that only the given set of models should be played. */ - virtual void setSoloModelSet(std::set<Model *>s); + virtual void setSoloModelSet(std::set<ModelId>s); /** * Specify that all models should be played as normal (if not @@ -100,7 +100,7 @@ virtual void clearSoloModelSet(); protected slots: - void playClipIdChanged(const Playable *, QString); + void playClipIdChanged(int playableId, QString); protected: sv_samplerate_t m_sourceSampleRate; @@ -108,7 +108,7 @@ int m_waveType; bool m_soloing; - std::set<Model *> m_soloModelSet; + std::set<ModelId> m_soloModelSet; struct NoteOff { @@ -140,12 +140,12 @@ }; - typedef std::map<const ModelId, ClipMixer *> ClipMixerMap; + typedef std::map<ModelId, ClipMixer *> ClipMixerMap; typedef std::multiset<NoteOff, NoteOff::Comparator> NoteOffSet; - typedef std::map<const ModelId, NoteOffSet> NoteOffMap; + typedef std::map<ModelId, NoteOffSet> NoteOffMap; - typedef std::map<const ModelId, ContinuousSynth *> ContinuousSynthMap; + typedef std::map<ModelId, ContinuousSynth *> ContinuousSynthMap; QMutex m_mutex; @@ -155,25 +155,25 @@ ContinuousSynthMap m_continuousSynthMap; - bool usesClipMixer(const Model *); - bool wantsQuieterClips(const Model *); - bool usesContinuousSynth(const Model *); + bool usesClipMixer(ModelId); + bool wantsQuieterClips(ModelId); + bool usesContinuousSynth(ModelId); - ClipMixer *makeClipMixerFor(const Model *model); - ContinuousSynth *makeSynthFor(const Model *model); + ClipMixer *makeClipMixerFor(ModelId model); + ContinuousSynth *makeSynthFor(ModelId model); static void initialiseSampleDir(); virtual sv_frame_t mixDenseTimeValueModel - (DenseTimeValueModel *model, sv_frame_t startFrame, sv_frame_t frameCount, + (ModelId model, sv_frame_t startFrame, sv_frame_t frameCount, float **buffer, float gain, float pan, sv_frame_t fadeIn, sv_frame_t fadeOut); virtual sv_frame_t mixClipModel - (Model *model, sv_frame_t startFrame, sv_frame_t frameCount, + (ModelId model, sv_frame_t startFrame, sv_frame_t frameCount, float **buffer, float gain, float pan); virtual sv_frame_t mixContinuousSynthModel - (Model *model, sv_frame_t startFrame, sv_frame_t frameCount, + (ModelId model, sv_frame_t startFrame, sv_frame_t frameCount, float **buffer, float gain, float pan); static const sv_frame_t m_processingBlockSize;