Chris@0: /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ Chris@0: Chris@0: /* Chris@0: Sonic Visualiser Chris@0: An audio file viewer and annotation editor. Chris@0: Centre for Digital Music, Queen Mary, University of London. Chris@0: This file copyright 2006 Chris Cannam. Chris@0: Chris@0: This program is free software; you can redistribute it and/or Chris@0: modify it under the terms of the GNU General Public License as Chris@0: published by the Free Software Foundation; either version 2 of the Chris@0: License, or (at your option) any later version. See the file Chris@0: COPYING included with this distribution for more information. Chris@0: */ Chris@0: Chris@0: #include "AudioGenerator.h" Chris@0: Chris@0: #include "base/TempDirectory.h" Chris@0: #include "base/PlayParameters.h" Chris@0: #include "base/PlayParameterRepository.h" Chris@0: #include "base/Pitch.h" Chris@0: #include "base/Exceptions.h" Chris@0: Chris@1: #include "data/model/NoteModel.h" Chris@1: #include "data/model/DenseTimeValueModel.h" Chris@1: #include "data/model/SparseOneDimensionalModel.h" Chris@0: Chris@0: #include "plugin/RealTimePluginFactory.h" Chris@0: #include "plugin/RealTimePluginInstance.h" Chris@0: #include "plugin/PluginIdentifier.h" Chris@0: #include "plugin/PluginXml.h" Chris@0: #include "plugin/api/alsa/seq_event.h" Chris@0: Chris@0: #include Chris@0: #include Chris@0: Chris@0: #include Chris@0: #include Chris@0: Chris@0: const size_t Chris@0: AudioGenerator::m_pluginBlockSize = 2048; Chris@0: Chris@0: QString Chris@0: AudioGenerator::m_sampleDir = ""; Chris@0: Chris@0: //#define DEBUG_AUDIO_GENERATOR 1 Chris@0: Chris@0: AudioGenerator::AudioGenerator() : Chris@0: m_sourceSampleRate(0), Chris@180: m_targetChannelCount(1), Chris@180: m_soloing(false) Chris@0: { Chris@0: connect(PlayParameterRepository::getInstance(), Chris@0: SIGNAL(playPluginIdChanged(const Model *, QString)), Chris@0: this, Chris@0: SLOT(playPluginIdChanged(const Model *, QString))); Chris@0: Chris@0: connect(PlayParameterRepository::getInstance(), Chris@0: SIGNAL(playPluginConfigurationChanged(const Model *, QString)), Chris@0: this, Chris@0: SLOT(playPluginConfigurationChanged(const Model *, QString))); Chris@0: } Chris@0: Chris@0: AudioGenerator::~AudioGenerator() Chris@0: { Chris@0: } Chris@0: Chris@0: bool Chris@0: AudioGenerator::canPlay(const Model *model) Chris@0: { Chris@0: if (dynamic_cast(model) || Chris@0: dynamic_cast(model) || Chris@0: dynamic_cast(model)) { Chris@0: return true; Chris@0: } else { Chris@0: return false; Chris@0: } Chris@0: } Chris@0: Chris@0: bool Chris@0: AudioGenerator::addModel(Model *model) Chris@0: { Chris@0: if (m_sourceSampleRate == 0) { Chris@0: Chris@0: m_sourceSampleRate = model->getSampleRate(); Chris@0: Chris@0: } else { Chris@0: Chris@0: DenseTimeValueModel *dtvm = Chris@0: dynamic_cast(model); Chris@0: Chris@0: if (dtvm) { Chris@0: m_sourceSampleRate = model->getSampleRate(); Chris@0: return true; Chris@0: } Chris@0: } Chris@0: Chris@0: RealTimePluginInstance *plugin = loadPluginFor(model); Chris@0: if (plugin) { Chris@0: QMutexLocker locker(&m_mutex); Chris@0: m_synthMap[model] = plugin; Chris@0: return true; Chris@0: } Chris@0: Chris@0: return false; Chris@0: } Chris@0: Chris@0: void Chris@0: AudioGenerator::playPluginIdChanged(const Model *model, QString) Chris@0: { Chris@0: if (m_synthMap.find(model) == m_synthMap.end()) return; Chris@0: Chris@0: RealTimePluginInstance *plugin = loadPluginFor(model); Chris@0: if (plugin) { Chris@0: QMutexLocker locker(&m_mutex); Chris@0: delete m_synthMap[model]; Chris@0: m_synthMap[model] = plugin; Chris@0: } Chris@0: } Chris@0: Chris@0: void Chris@0: AudioGenerator::playPluginConfigurationChanged(const Model *model, Chris@0: QString configurationXml) Chris@0: { Chris@0: // std::cerr << "AudioGenerator::playPluginConfigurationChanged" << std::endl; Chris@0: Chris@0: if (m_synthMap.find(model) == m_synthMap.end()) { Chris@0: std::cerr << "AudioGenerator::playPluginConfigurationChanged: We don't know about this plugin" << std::endl; Chris@0: return; Chris@0: } Chris@0: Chris@0: RealTimePluginInstance *plugin = m_synthMap[model]; Chris@0: if (plugin) { Chris@0: PluginXml(plugin).setParametersFromXml(configurationXml); Chris@0: } Chris@0: } Chris@0: Chris@0: QString Chris@0: AudioGenerator::getDefaultPlayPluginId(const Model *model) Chris@0: { Chris@0: const SparseOneDimensionalModel *sodm = Chris@0: dynamic_cast(model); Chris@0: if (sodm) { Chris@0: return QString("dssi:%1:sample_player"). Chris@0: arg(PluginIdentifier::BUILTIN_PLUGIN_SONAME); Chris@0: } Chris@0: Chris@0: const NoteModel *nm = dynamic_cast(model); Chris@0: if (nm) { Chris@0: return QString("dssi:%1:sample_player"). Chris@0: arg(PluginIdentifier::BUILTIN_PLUGIN_SONAME); Chris@0: } Chris@0: Chris@0: return ""; Chris@0: } Chris@0: Chris@0: QString Chris@0: AudioGenerator::getDefaultPlayPluginConfiguration(const Model *model) Chris@0: { Chris@0: QString program = ""; Chris@0: Chris@0: const SparseOneDimensionalModel *sodm = Chris@0: dynamic_cast(model); Chris@0: if (sodm) { Chris@0: program = "tap"; Chris@0: } Chris@0: Chris@0: const NoteModel *nm = dynamic_cast(model); Chris@0: if (nm) { Chris@0: program = "piano"; Chris@0: } Chris@0: Chris@0: if (program == "") return ""; Chris@0: Chris@0: return Chris@0: QString("") Chris@0: .arg(XmlExportable::encodeEntities Chris@0: (QString("sampledir=%1") Chris@0: .arg(PluginXml::encodeConfigurationChars(getSampleDir())))) Chris@0: .arg(XmlExportable::encodeEntities(program)); Chris@0: } Chris@0: Chris@0: QString Chris@0: AudioGenerator::getSampleDir() Chris@0: { Chris@0: if (m_sampleDir != "") return m_sampleDir; Chris@0: Chris@0: try { Chris@0: m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples"); Chris@0: } catch (DirectoryCreationFailed f) { Chris@0: std::cerr << "WARNING: AudioGenerator::getSampleDir: Failed to create " Chris@0: << "temporary sample directory" << std::endl; Chris@0: m_sampleDir = ""; Chris@0: return ""; Chris@0: } Chris@0: Chris@0: QDir sampleResourceDir(":/samples", "*.wav"); Chris@0: Chris@0: for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) { Chris@0: Chris@0: QString fileName(sampleResourceDir[i]); Chris@0: QFile file(sampleResourceDir.filePath(fileName)); Chris@0: Chris@0: if (!file.copy(QDir(m_sampleDir).filePath(fileName))) { Chris@0: std::cerr << "WARNING: AudioGenerator::getSampleDir: " Chris@0: << "Unable to copy " << fileName.toStdString() Chris@0: << " into temporary directory \"" Chris@0: << m_sampleDir.toStdString() << "\"" << std::endl; Chris@0: } Chris@0: } Chris@0: Chris@0: return m_sampleDir; Chris@0: } Chris@0: Chris@0: void Chris@0: AudioGenerator::setSampleDir(RealTimePluginInstance *plugin) Chris@0: { Chris@0: plugin->configure("sampledir", getSampleDir().toStdString()); Chris@0: } Chris@0: Chris@0: RealTimePluginInstance * Chris@0: AudioGenerator::loadPluginFor(const Model *model) Chris@0: { Chris@0: QString pluginId, configurationXml; Chris@0: Chris@0: PlayParameters *parameters = Chris@0: PlayParameterRepository::getInstance()->getPlayParameters(model); Chris@0: if (parameters) { Chris@0: pluginId = parameters->getPlayPluginId(); Chris@0: configurationXml = parameters->getPlayPluginConfiguration(); Chris@0: } Chris@0: Chris@0: if (pluginId == "") { Chris@0: pluginId = getDefaultPlayPluginId(model); Chris@0: configurationXml = getDefaultPlayPluginConfiguration(model); Chris@0: } Chris@0: Chris@0: if (pluginId == "") return 0; Chris@0: Chris@0: RealTimePluginInstance *plugin = loadPlugin(pluginId, ""); Chris@0: if (!plugin) return 0; Chris@0: Chris@0: if (configurationXml != "") { Chris@0: PluginXml(plugin).setParametersFromXml(configurationXml); Chris@0: } Chris@0: Chris@0: if (parameters) { Chris@0: parameters->setPlayPluginId(pluginId); Chris@0: parameters->setPlayPluginConfiguration(configurationXml); Chris@0: } Chris@0: Chris@0: return plugin; Chris@0: } Chris@0: Chris@0: RealTimePluginInstance * Chris@0: AudioGenerator::loadPlugin(QString pluginId, QString program) Chris@0: { Chris@0: RealTimePluginFactory *factory = Chris@0: RealTimePluginFactory::instanceFor(pluginId); Chris@0: Chris@0: if (!factory) { Chris@0: std::cerr << "Failed to get plugin factory" << std::endl; Chris@0: return false; Chris@0: } Chris@0: Chris@0: RealTimePluginInstance *instance = Chris@0: factory->instantiatePlugin Chris@0: (pluginId, 0, 0, m_sourceSampleRate, m_pluginBlockSize, m_targetChannelCount); Chris@0: Chris@0: if (!instance) { Chris@0: std::cerr << "Failed to instantiate plugin " << pluginId.toStdString() << std::endl; Chris@0: return 0; Chris@0: } Chris@0: Chris@0: setSampleDir(instance); Chris@0: Chris@0: for (unsigned int i = 0; i < instance->getParameterCount(); ++i) { Chris@0: instance->setParameterValue(i, instance->getParameterDefault(i)); Chris@0: } Chris@0: std::string defaultProgram = instance->getProgram(0, 0); Chris@0: if (defaultProgram != "") { Chris@0: // std::cerr << "first selecting default program " << defaultProgram << std::endl; Chris@0: instance->selectProgram(defaultProgram); Chris@0: } Chris@0: if (program != "") { Chris@0: // std::cerr << "now selecting desired program " << program.toStdString() << std::endl; Chris@0: instance->selectProgram(program.toStdString()); Chris@0: } Chris@0: instance->setIdealChannelCount(m_targetChannelCount); // reset! Chris@0: Chris@0: return instance; Chris@0: } Chris@0: Chris@0: void Chris@0: AudioGenerator::removeModel(Model *model) Chris@0: { Chris@0: SparseOneDimensionalModel *sodm = Chris@0: dynamic_cast(model); Chris@0: if (!sodm) return; // nothing to do Chris@0: Chris@0: QMutexLocker locker(&m_mutex); Chris@0: Chris@0: if (m_synthMap.find(sodm) == m_synthMap.end()) return; Chris@0: Chris@0: RealTimePluginInstance *instance = m_synthMap[sodm]; Chris@0: m_synthMap.erase(sodm); Chris@0: delete instance; Chris@0: } Chris@0: Chris@0: void Chris@0: AudioGenerator::clearModels() Chris@0: { Chris@0: QMutexLocker locker(&m_mutex); Chris@0: while (!m_synthMap.empty()) { Chris@0: RealTimePluginInstance *instance = m_synthMap.begin()->second; Chris@0: m_synthMap.erase(m_synthMap.begin()); Chris@0: delete instance; Chris@0: } Chris@0: } Chris@0: Chris@0: void Chris@0: AudioGenerator::reset() Chris@0: { Chris@0: QMutexLocker locker(&m_mutex); Chris@0: for (PluginMap::iterator i = m_synthMap.begin(); i != m_synthMap.end(); ++i) { Chris@0: if (i->second) { Chris@0: i->second->silence(); Chris@0: i->second->discardEvents(); Chris@0: } Chris@0: } Chris@0: Chris@0: m_noteOffs.clear(); Chris@0: } Chris@0: Chris@0: void Chris@0: AudioGenerator::setTargetChannelCount(size_t targetChannelCount) Chris@0: { Chris@0: if (m_targetChannelCount == targetChannelCount) return; Chris@0: Chris@0: // std::cerr << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << std::endl; Chris@0: Chris@0: QMutexLocker locker(&m_mutex); Chris@0: m_targetChannelCount = targetChannelCount; Chris@0: Chris@0: for (PluginMap::iterator i = m_synthMap.begin(); i != m_synthMap.end(); ++i) { Chris@0: if (i->second) i->second->setIdealChannelCount(targetChannelCount); Chris@0: } Chris@0: } Chris@0: Chris@0: size_t Chris@0: AudioGenerator::getBlockSize() const Chris@0: { Chris@0: return m_pluginBlockSize; Chris@0: } Chris@0: Chris@180: void Chris@180: AudioGenerator::setSoloModelSet(std::set s) Chris@180: { Chris@180: QMutexLocker locker(&m_mutex); Chris@180: Chris@180: m_soloModelSet = s; Chris@180: m_soloing = true; Chris@180: } Chris@180: Chris@180: void Chris@180: AudioGenerator::clearSoloModelSet() Chris@180: { Chris@180: QMutexLocker locker(&m_mutex); Chris@180: Chris@180: m_soloModelSet.clear(); Chris@180: m_soloing = false; Chris@180: } Chris@180: Chris@0: size_t Chris@0: AudioGenerator::mixModel(Model *model, size_t startFrame, size_t frameCount, Chris@0: float **buffer, size_t fadeIn, size_t fadeOut) Chris@0: { Chris@0: if (m_sourceSampleRate == 0) { Chris@0: std::cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << std::endl; Chris@0: return frameCount; Chris@0: } Chris@0: Chris@0: QMutexLocker locker(&m_mutex); Chris@0: Chris@0: PlayParameters *parameters = Chris@0: PlayParameterRepository::getInstance()->getPlayParameters(model); Chris@0: if (!parameters) return frameCount; Chris@0: Chris@0: bool playing = !parameters->isPlayMuted(); Chris@118: if (!playing) { Chris@118: #ifdef DEBUG_AUDIO_GENERATOR Chris@118: std::cout << "AudioGenerator::mixModel(" << model << "): muted" << std::endl; Chris@118: #endif Chris@118: return frameCount; Chris@118: } Chris@0: Chris@180: if (m_soloing) { Chris@180: if (m_soloModelSet.find(model) == m_soloModelSet.end()) { Chris@180: #ifdef DEBUG_AUDIO_GENERATOR Chris@180: std::cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << std::endl; Chris@180: #endif Chris@180: return frameCount; Chris@180: } Chris@180: } Chris@180: Chris@0: float gain = parameters->getPlayGain(); Chris@0: float pan = parameters->getPlayPan(); Chris@0: Chris@0: DenseTimeValueModel *dtvm = dynamic_cast(model); Chris@0: if (dtvm) { Chris@0: return mixDenseTimeValueModel(dtvm, startFrame, frameCount, Chris@0: buffer, gain, pan, fadeIn, fadeOut); Chris@0: } Chris@0: Chris@0: SparseOneDimensionalModel *sodm = dynamic_cast Chris@0: (model); Chris@0: if (sodm) { Chris@0: return mixSparseOneDimensionalModel(sodm, startFrame, frameCount, Chris@0: buffer, gain, pan, fadeIn, fadeOut); Chris@0: } Chris@0: Chris@0: NoteModel *nm = dynamic_cast(model); Chris@0: if (nm) { Chris@0: return mixNoteModel(nm, startFrame, frameCount, Chris@0: buffer, gain, pan, fadeIn, fadeOut); Chris@0: } Chris@0: Chris@0: return frameCount; Chris@0: } Chris@0: Chris@0: size_t Chris@0: AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, Chris@0: size_t startFrame, size_t frames, Chris@0: float **buffer, float gain, float pan, Chris@0: size_t fadeIn, size_t fadeOut) Chris@0: { Chris@0: static float *channelBuffer = 0; Chris@0: static size_t channelBufSiz = 0; Chris@0: Chris@0: size_t totalFrames = frames + fadeIn/2 + fadeOut/2; Chris@0: Chris@0: if (channelBufSiz < totalFrames) { Chris@0: delete[] channelBuffer; Chris@0: channelBuffer = new float[totalFrames]; Chris@0: channelBufSiz = totalFrames; Chris@0: } Chris@0: Chris@0: size_t got = 0; Chris@0: size_t prevChannel = 999; Chris@0: Chris@0: for (size_t c = 0; c < m_targetChannelCount; ++c) { Chris@0: Chris@0: size_t sourceChannel = (c % dtvm->getChannelCount()); Chris@0: Chris@0: // std::cerr << "mixing channel " << c << " from source channel " << sourceChannel << std::endl; Chris@0: Chris@0: float channelGain = gain; Chris@0: if (pan != 0.0) { Chris@0: if (c == 0) { Chris@0: if (pan > 0.0) channelGain *= 1.0 - pan; Chris@0: } else { Chris@0: if (pan < 0.0) channelGain *= pan + 1.0; Chris@0: } Chris@0: } Chris@0: Chris@0: if (prevChannel != sourceChannel) { Chris@0: if (startFrame >= fadeIn/2) { Chris@184: got = dtvm->getData Chris@0: (sourceChannel, Chris@184: startFrame - fadeIn/2, Chris@184: frames + fadeOut/2 + fadeIn/2, Chris@0: channelBuffer); Chris@0: } else { Chris@0: size_t missing = fadeIn/2 - startFrame; Chris@184: got = dtvm->getData Chris@0: (sourceChannel, Chris@184: startFrame, Chris@184: frames + fadeOut/2, Chris@0: channelBuffer + missing); Chris@0: } Chris@0: } Chris@0: prevChannel = sourceChannel; Chris@0: Chris@0: for (size_t i = 0; i < fadeIn/2; ++i) { Chris@0: float *back = buffer[c]; Chris@0: back -= fadeIn/2; Chris@0: back[i] += (channelGain * channelBuffer[i] * i) / fadeIn; Chris@0: } Chris@0: Chris@0: for (size_t i = 0; i < frames + fadeOut/2; ++i) { Chris@0: float mult = channelGain; Chris@0: if (i < fadeIn/2) { Chris@0: mult = (mult * i) / fadeIn; Chris@0: } Chris@0: if (i > frames - fadeOut/2) { Chris@0: mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut; Chris@0: } Chris@0: buffer[c][i] += mult * channelBuffer[i]; Chris@0: } Chris@0: } Chris@0: Chris@0: return got; Chris@0: } Chris@0: Chris@0: size_t Chris@0: AudioGenerator::mixSparseOneDimensionalModel(SparseOneDimensionalModel *sodm, Chris@0: size_t startFrame, size_t frames, Chris@0: float **buffer, float gain, float pan, Chris@0: size_t /* fadeIn */, Chris@0: size_t /* fadeOut */) Chris@0: { Chris@0: RealTimePluginInstance *plugin = m_synthMap[sodm]; Chris@0: if (!plugin) return 0; Chris@0: Chris@0: size_t latency = plugin->getLatency(); Chris@0: size_t blocks = frames / m_pluginBlockSize; Chris@0: Chris@0: //!!! hang on -- the fact that the audio callback play source's Chris@0: //buffer is a multiple of the plugin's buffer size doesn't mean Chris@0: //that we always get called for a multiple of it here (because it Chris@0: //also depends on the JACK block size). how should we ensure that Chris@0: //all models write the same amount in to the mix, and that we Chris@0: //always have a multiple of the plugin buffer size? I guess this Chris@0: //class has to be queryable for the plugin buffer size & the Chris@0: //callback play source has to use that as a multiple for all the Chris@0: //calls to mixModel Chris@0: Chris@0: size_t got = blocks * m_pluginBlockSize; Chris@0: Chris@0: #ifdef DEBUG_AUDIO_GENERATOR Chris@0: std::cout << "mixModel [sparse]: frames " << frames Chris@0: << ", blocks " << blocks << std::endl; Chris@0: #endif Chris@0: Chris@0: snd_seq_event_t onEv; Chris@0: onEv.type = SND_SEQ_EVENT_NOTEON; Chris@0: onEv.data.note.channel = 0; Chris@0: onEv.data.note.note = 64; Chris@185: onEv.data.note.velocity = 100; Chris@0: Chris@0: snd_seq_event_t offEv; Chris@0: offEv.type = SND_SEQ_EVENT_NOTEOFF; Chris@0: offEv.data.note.channel = 0; Chris@0: offEv.data.note.velocity = 0; Chris@0: Chris@0: NoteOffSet ¬eOffs = m_noteOffs[sodm]; Chris@0: Chris@0: for (size_t i = 0; i < blocks; ++i) { Chris@0: Chris@0: size_t reqStart = startFrame + i * m_pluginBlockSize; Chris@0: Chris@0: SparseOneDimensionalModel::PointList points = Chris@0: sodm->getPoints(reqStart + latency, Chris@0: reqStart + latency + m_pluginBlockSize); Chris@0: Chris@0: Vamp::RealTime blockTime = Vamp::RealTime::frame2RealTime Chris@0: (startFrame + i * m_pluginBlockSize, m_sourceSampleRate); Chris@0: Chris@0: for (SparseOneDimensionalModel::PointList::iterator pli = Chris@0: points.begin(); pli != points.end(); ++pli) { Chris@0: Chris@0: size_t pliFrame = pli->frame; Chris@0: Chris@0: if (pliFrame >= latency) pliFrame -= latency; Chris@0: Chris@0: if (pliFrame < reqStart || Chris@0: pliFrame >= reqStart + m_pluginBlockSize) continue; Chris@0: Chris@0: while (noteOffs.begin() != noteOffs.end() && Chris@0: noteOffs.begin()->frame <= pliFrame) { Chris@0: Chris@0: Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime Chris@0: (noteOffs.begin()->frame, m_sourceSampleRate); Chris@0: Chris@0: offEv.data.note.note = noteOffs.begin()->pitch; Chris@0: Chris@0: #ifdef DEBUG_AUDIO_GENERATOR Chris@0: std::cerr << "mixModel [sparse]: sending note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl; Chris@0: #endif Chris@0: Chris@0: plugin->sendEvent(eventTime, &offEv); Chris@0: noteOffs.erase(noteOffs.begin()); Chris@0: } Chris@0: Chris@0: Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime Chris@0: (pliFrame, m_sourceSampleRate); Chris@0: Chris@0: plugin->sendEvent(eventTime, &onEv); Chris@0: Chris@0: #ifdef DEBUG_AUDIO_GENERATOR Chris@0: std::cout << "mixModel [sparse]: point at frame " << pliFrame << ", block start " << (startFrame + i * m_pluginBlockSize) << ", resulting time " << eventTime << std::endl; Chris@0: #endif Chris@0: Chris@0: size_t duration = 7000; // frames [for now] Chris@0: NoteOff noff; Chris@0: noff.pitch = onEv.data.note.note; Chris@0: noff.frame = pliFrame + duration; Chris@0: noteOffs.insert(noff); Chris@0: } Chris@0: Chris@0: while (noteOffs.begin() != noteOffs.end() && Chris@0: noteOffs.begin()->frame <= Chris@0: startFrame + i * m_pluginBlockSize + m_pluginBlockSize) { Chris@0: Chris@0: Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime Chris@0: (noteOffs.begin()->frame, m_sourceSampleRate); Chris@0: Chris@0: offEv.data.note.note = noteOffs.begin()->pitch; Chris@0: Chris@0: #ifdef DEBUG_AUDIO_GENERATOR Chris@0: std::cerr << "mixModel [sparse]: sending leftover note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl; Chris@0: #endif Chris@0: Chris@0: plugin->sendEvent(eventTime, &offEv); Chris@0: noteOffs.erase(noteOffs.begin()); Chris@0: } Chris@0: Chris@0: plugin->run(blockTime); Chris@0: float **outs = plugin->getAudioOutputBuffers(); Chris@0: Chris@0: for (size_t c = 0; c < m_targetChannelCount; ++c) { Chris@0: #ifdef DEBUG_AUDIO_GENERATOR Chris@0: std::cout << "mixModel [sparse]: adding " << m_pluginBlockSize << " samples from plugin output " << c << std::endl; Chris@0: #endif Chris@0: Chris@0: size_t sourceChannel = (c % plugin->getAudioOutputCount()); Chris@0: Chris@0: float channelGain = gain; Chris@0: if (pan != 0.0) { Chris@0: if (c == 0) { Chris@0: if (pan > 0.0) channelGain *= 1.0 - pan; Chris@0: } else { Chris@0: if (pan < 0.0) channelGain *= pan + 1.0; Chris@0: } Chris@0: } Chris@0: Chris@0: for (size_t j = 0; j < m_pluginBlockSize; ++j) { Chris@0: buffer[c][i * m_pluginBlockSize + j] += Chris@0: channelGain * outs[sourceChannel][j]; Chris@0: } Chris@0: } Chris@0: } Chris@0: Chris@0: return got; Chris@0: } Chris@0: Chris@0: Chris@0: //!!! mucho duplication with above -- refactor Chris@0: size_t Chris@0: AudioGenerator::mixNoteModel(NoteModel *nm, Chris@0: size_t startFrame, size_t frames, Chris@0: float **buffer, float gain, float pan, Chris@0: size_t /* fadeIn */, Chris@0: size_t /* fadeOut */) Chris@0: { Chris@0: RealTimePluginInstance *plugin = m_synthMap[nm]; Chris@0: if (!plugin) return 0; Chris@0: Chris@0: size_t latency = plugin->getLatency(); Chris@0: size_t blocks = frames / m_pluginBlockSize; Chris@0: Chris@0: //!!! hang on -- the fact that the audio callback play source's Chris@0: //buffer is a multiple of the plugin's buffer size doesn't mean Chris@0: //that we always get called for a multiple of it here (because it Chris@0: //also depends on the JACK block size). how should we ensure that Chris@0: //all models write the same amount in to the mix, and that we Chris@0: //always have a multiple of the plugin buffer size? I guess this Chris@0: //class has to be queryable for the plugin buffer size & the Chris@0: //callback play source has to use that as a multiple for all the Chris@0: //calls to mixModel Chris@0: Chris@0: size_t got = blocks * m_pluginBlockSize; Chris@0: Chris@0: #ifdef DEBUG_AUDIO_GENERATOR Chris@0: std::cout << "mixModel [note]: frames " << frames Chris@0: << ", blocks " << blocks << std::endl; Chris@0: #endif Chris@0: Chris@0: snd_seq_event_t onEv; Chris@0: onEv.type = SND_SEQ_EVENT_NOTEON; Chris@0: onEv.data.note.channel = 0; Chris@0: onEv.data.note.note = 64; Chris@185: onEv.data.note.velocity = 100; Chris@0: Chris@0: snd_seq_event_t offEv; Chris@0: offEv.type = SND_SEQ_EVENT_NOTEOFF; Chris@0: offEv.data.note.channel = 0; Chris@0: offEv.data.note.velocity = 0; Chris@0: Chris@0: NoteOffSet ¬eOffs = m_noteOffs[nm]; Chris@0: Chris@0: for (size_t i = 0; i < blocks; ++i) { Chris@0: Chris@0: size_t reqStart = startFrame + i * m_pluginBlockSize; Chris@0: Chris@0: NoteModel::PointList points = Chris@0: nm->getPoints(reqStart + latency, Chris@0: reqStart + latency + m_pluginBlockSize); Chris@0: Chris@0: Vamp::RealTime blockTime = Vamp::RealTime::frame2RealTime Chris@0: (startFrame + i * m_pluginBlockSize, m_sourceSampleRate); Chris@0: Chris@0: for (NoteModel::PointList::iterator pli = Chris@0: points.begin(); pli != points.end(); ++pli) { Chris@0: Chris@0: size_t pliFrame = pli->frame; Chris@0: Chris@0: if (pliFrame >= latency) pliFrame -= latency; Chris@0: Chris@0: if (pliFrame < reqStart || Chris@0: pliFrame >= reqStart + m_pluginBlockSize) continue; Chris@0: Chris@0: while (noteOffs.begin() != noteOffs.end() && Chris@0: noteOffs.begin()->frame <= pliFrame) { Chris@0: Chris@0: Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime Chris@0: (noteOffs.begin()->frame, m_sourceSampleRate); Chris@0: Chris@0: offEv.data.note.note = noteOffs.begin()->pitch; Chris@0: Chris@0: #ifdef DEBUG_AUDIO_GENERATOR Chris@0: std::cerr << "mixModel [note]: sending note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl; Chris@0: #endif Chris@0: Chris@0: plugin->sendEvent(eventTime, &offEv); Chris@0: noteOffs.erase(noteOffs.begin()); Chris@0: } Chris@0: Chris@0: Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime Chris@0: (pliFrame, m_sourceSampleRate); Chris@0: Chris@0: if (nm->getScaleUnits() == "Hz") { Chris@0: onEv.data.note.note = Pitch::getPitchForFrequency(pli->value); Chris@0: } else { Chris@0: onEv.data.note.note = lrintf(pli->value); Chris@0: } Chris@0: Chris@0: plugin->sendEvent(eventTime, &onEv); Chris@0: Chris@0: #ifdef DEBUG_AUDIO_GENERATOR Chris@0: std::cout << "mixModel [note]: point at frame " << pliFrame << ", block start " << (startFrame + i * m_pluginBlockSize) << ", resulting time " << eventTime << std::endl; Chris@0: #endif Chris@0: Chris@0: size_t duration = pli->duration; Chris@0: if (duration == 0 || duration == 1) { Chris@0: duration = m_sourceSampleRate / 20; Chris@0: } Chris@0: NoteOff noff; Chris@0: noff.pitch = onEv.data.note.note; Chris@0: noff.frame = pliFrame + duration; Chris@0: noteOffs.insert(noff); Chris@0: } Chris@0: Chris@0: while (noteOffs.begin() != noteOffs.end() && Chris@0: noteOffs.begin()->frame <= Chris@0: startFrame + i * m_pluginBlockSize + m_pluginBlockSize) { Chris@0: Chris@0: Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime Chris@0: (noteOffs.begin()->frame, m_sourceSampleRate); Chris@0: Chris@0: offEv.data.note.note = noteOffs.begin()->pitch; Chris@0: Chris@0: #ifdef DEBUG_AUDIO_GENERATOR Chris@0: std::cerr << "mixModel [note]: sending leftover note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl; Chris@0: #endif Chris@0: Chris@0: plugin->sendEvent(eventTime, &offEv); Chris@0: noteOffs.erase(noteOffs.begin()); Chris@0: } Chris@0: Chris@0: plugin->run(blockTime); Chris@0: float **outs = plugin->getAudioOutputBuffers(); Chris@0: Chris@0: for (size_t c = 0; c < m_targetChannelCount; ++c) { Chris@0: #ifdef DEBUG_AUDIO_GENERATOR Chris@0: std::cout << "mixModel [note]: adding " << m_pluginBlockSize << " samples from plugin output " << c << std::endl; Chris@0: #endif Chris@0: Chris@0: size_t sourceChannel = (c % plugin->getAudioOutputCount()); Chris@0: Chris@0: float channelGain = gain; Chris@0: if (pan != 0.0) { Chris@0: if (c == 0) { Chris@0: if (pan > 0.0) channelGain *= 1.0 - pan; Chris@0: } else { Chris@0: if (pan < 0.0) channelGain *= pan + 1.0; Chris@0: } Chris@0: } Chris@0: Chris@0: for (size_t j = 0; j < m_pluginBlockSize; ++j) { Chris@0: buffer[c][i * m_pluginBlockSize + j] += Chris@0: channelGain * outs[sourceChannel][j]; Chris@0: } Chris@0: } Chris@0: } Chris@0: Chris@0: return got; Chris@0: } Chris@0: