Mercurial > hg > svapp
changeset 582:b2d49e7c4149
Merge from branch 3.0-integration
author | Chris Cannam |
---|---|
date | Fri, 13 Jan 2017 10:29:55 +0000 |
parents | d65ce7e55346 (current diff) 8cc291b13f2b (diff) |
children | 48cfa4e2bfc1 |
files | audioio/AudioCallbackPlaySource.cpp audioio/AudioCallbackPlaySource.h audioio/AudioCallbackPlayTarget.cpp audioio/AudioCallbackPlayTarget.h audioio/AudioGenerator.cpp audioio/AudioGenerator.h audioio/AudioJACKTarget.cpp audioio/AudioJACKTarget.h audioio/AudioPortAudioTarget.cpp audioio/AudioPortAudioTarget.h audioio/AudioPulseAudioTarget.cpp audioio/AudioPulseAudioTarget.h audioio/AudioTargetFactory.cpp audioio/AudioTargetFactory.h audioio/ClipMixer.cpp audioio/ClipMixer.h audioio/ContinuousSynth.cpp audioio/ContinuousSynth.h audioio/PlaySpeedRangeMapper.cpp audioio/PlaySpeedRangeMapper.h |
diffstat | 44 files changed, 5536 insertions(+), 6325 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/audio/AudioCallbackPlaySource.cpp Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,1833 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + This file copyright 2006 Chris Cannam and QMUL. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#include "AudioCallbackPlaySource.h" + +#include "AudioGenerator.h" + +#include "data/model/Model.h" +#include "base/ViewManagerBase.h" +#include "base/PlayParameterRepository.h" +#include "base/Preferences.h" +#include "data/model/DenseTimeValueModel.h" +#include "data/model/WaveFileModel.h" +#include "data/model/ReadOnlyWaveFileModel.h" +#include "data/model/SparseOneDimensionalModel.h" +#include "plugin/RealTimePluginInstance.h" + +#include "bqaudioio/SystemPlaybackTarget.h" +#include "bqaudioio/ResamplerWrapper.h" + +#include "bqvec/VectorOps.h" + +#include <rubberband/RubberBandStretcher.h> +using namespace RubberBand; + +using breakfastquay::v_zero_channels; + +#include <iostream> +#include <cassert> + +//#define DEBUG_AUDIO_PLAY_SOURCE 1 +//#define DEBUG_AUDIO_PLAY_SOURCE_PLAYING 1 + +static const int DEFAULT_RING_BUFFER_SIZE = 131071; + +AudioCallbackPlaySource::AudioCallbackPlaySource(ViewManagerBase *manager, + QString clientName) : + m_viewManager(manager), + m_audioGenerator(new AudioGenerator()), + m_clientName(clientName.toUtf8().data()), + m_readBuffers(0), + m_writeBuffers(0), + m_readBufferFill(0), + m_writeBufferFill(0), + m_bufferScavenger(1), + m_sourceChannelCount(0), + m_blockSize(1024), + m_sourceSampleRate(0), + m_deviceSampleRate(0), + m_deviceChannelCount(0), + m_playLatency(0), + m_target(0), + m_lastRetrievalTimestamp(0.0), + m_lastRetrievedBlockSize(0), + m_trustworthyTimestamps(true), + m_lastCurrentFrame(0), + m_playing(false), + m_exiting(false), + m_lastModelEndFrame(0), + m_ringBufferSize(DEFAULT_RING_BUFFER_SIZE), + m_outputLeft(0.0), + m_outputRight(0.0), + m_levelsSet(false), + m_auditioningPlugin(0), + m_auditioningPluginBypassed(false), + m_playStartFrame(0), + m_playStartFramePassed(false), + m_timeStretcher(0), + m_monoStretcher(0), + m_stretchRatio(1.0), + m_stretchMono(false), + m_stretcherInputCount(0), + m_stretcherInputs(0), + m_stretcherInputSizes(0), + m_fillThread(0), + m_resamplerWrapper(0) +{ + m_viewManager->setAudioPlaySource(this); + + connect(m_viewManager, SIGNAL(selectionChanged()), + this, SLOT(selectionChanged())); + connect(m_viewManager, SIGNAL(playLoopModeChanged()), + this, SLOT(playLoopModeChanged())); + connect(m_viewManager, SIGNAL(playSelectionModeChanged()), + this, SLOT(playSelectionModeChanged())); + + connect(this, SIGNAL(playStatusChanged(bool)), + m_viewManager, SLOT(playStatusChanged(bool))); + + connect(PlayParameterRepository::getInstance(), + SIGNAL(playParametersChanged(PlayParameters *)), + this, SLOT(playParametersChanged(PlayParameters *))); + + connect(Preferences::getInstance(), + SIGNAL(propertyChanged(PropertyContainer::PropertyName)), + this, SLOT(preferenceChanged(PropertyContainer::PropertyName))); +} + +AudioCallbackPlaySource::~AudioCallbackPlaySource() +{ +#ifdef DEBUG_AUDIO_PLAY_SOURCE + SVDEBUG << "AudioCallbackPlaySource::~AudioCallbackPlaySource entering" << endl; +#endif + m_exiting = true; + + if (m_fillThread) { +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySource dtor: awakening thread" << endl; +#endif + m_condition.wakeAll(); + m_fillThread->wait(); + delete m_fillThread; + } + + clearModels(); + + if (m_readBuffers != m_writeBuffers) { + delete m_readBuffers; + } + + delete m_writeBuffers; + + delete m_audioGenerator; + + for (int i = 0; i < m_stretcherInputCount; ++i) { + delete[] m_stretcherInputs[i]; + } + delete[] m_stretcherInputSizes; + delete[] m_stretcherInputs; + + delete m_timeStretcher; + delete m_monoStretcher; + + m_bufferScavenger.scavenge(true); + m_pluginScavenger.scavenge(true); +#ifdef DEBUG_AUDIO_PLAY_SOURCE + SVDEBUG << "AudioCallbackPlaySource::~AudioCallbackPlaySource finishing" << endl; +#endif +} + +void +AudioCallbackPlaySource::addModel(Model *model) +{ + if (m_models.find(model) != m_models.end()) return; + + bool willPlay = m_audioGenerator->addModel(model); + + m_mutex.lock(); + + m_models.insert(model); + if (model->getEndFrame() > m_lastModelEndFrame) { + m_lastModelEndFrame = model->getEndFrame(); + } + + bool buffersIncreased = false, srChanged = false; + + int modelChannels = 1; + ReadOnlyWaveFileModel *rowfm = qobject_cast<ReadOnlyWaveFileModel *>(model); + if (rowfm) modelChannels = rowfm->getChannelCount(); + if (modelChannels > m_sourceChannelCount) { + m_sourceChannelCount = modelChannels; + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySource: Adding model with " << modelChannels << " channels at rate " << model->getSampleRate() << endl; +#endif + + if (m_sourceSampleRate == 0) { + + SVDEBUG << "AudioCallbackPlaySource::addModel: Source rate changing from 0 to " + << model->getSampleRate() << endl; + + m_sourceSampleRate = model->getSampleRate(); + srChanged = true; + + } else if (model->getSampleRate() != m_sourceSampleRate) { + + // If this is a read-only wave file model and we have no + // other, we can just switch to this model's sample rate + + if (rowfm) { + + bool conflicting = false; + + for (std::set<Model *>::const_iterator i = m_models.begin(); + i != m_models.end(); ++i) { + // Only read-only wave file models should be + // considered conflicting -- writable wave file models + // are derived and we shouldn't take their rates into + // account. Also, don't give any particular weight to + // a file that's already playing at the wrong rate + // anyway + ReadOnlyWaveFileModel *other = + qobject_cast<ReadOnlyWaveFileModel *>(*i); + if (other && other != rowfm && + other->getSampleRate() != model->getSampleRate() && + other->getSampleRate() == m_sourceSampleRate) { + SVDEBUG << "AudioCallbackPlaySource::addModel: Conflicting wave file model " << *i << " found" << endl; + conflicting = true; + break; + } + } + + if (conflicting) { + + SVDEBUG << "AudioCallbackPlaySource::addModel: ERROR: " + << "New model sample rate does not match" << endl + << "existing model(s) (new " << model->getSampleRate() + << " vs " << m_sourceSampleRate + << "), playback will be wrong" + << endl; + + emit sampleRateMismatch(model->getSampleRate(), + m_sourceSampleRate, + false); + } else { + SVDEBUG << "AudioCallbackPlaySource::addModel: Source rate changing from " + << m_sourceSampleRate << " to " << model->getSampleRate() << endl; + + m_sourceSampleRate = model->getSampleRate(); + srChanged = true; + } + } + } + + if (!m_writeBuffers || (int)m_writeBuffers->size() < getTargetChannelCount()) { + cerr << "m_writeBuffers size = " << (m_writeBuffers ? m_writeBuffers->size() : 0) << endl; + cerr << "target channel count = " << (getTargetChannelCount()) << endl; + clearRingBuffers(true, getTargetChannelCount()); + buffersIncreased = true; + } else { + if (willPlay) clearRingBuffers(true); + } + + if (srChanged) { + + SVCERR << "AudioCallbackPlaySource: Source rate changed" << endl; + + if (m_resamplerWrapper) { + SVCERR << "AudioCallbackPlaySource: Source sample rate changed to " + << m_sourceSampleRate << ", updating resampler wrapper" << endl; + m_resamplerWrapper->changeApplicationSampleRate + (int(round(m_sourceSampleRate))); + m_resamplerWrapper->reset(); + } + + delete m_timeStretcher; + delete m_monoStretcher; + m_timeStretcher = 0; + m_monoStretcher = 0; + + if (m_stretchRatio != 1.f) { + setTimeStretch(m_stretchRatio); + } + } + + rebuildRangeLists(); + + m_mutex.unlock(); + + m_audioGenerator->setTargetChannelCount(getTargetChannelCount()); + + if (buffersIncreased) { + SVDEBUG << "AudioCallbackPlaySource::addModel: Number of buffers increased to " << getTargetChannelCount() << endl; + if (getTargetChannelCount() > getDeviceChannelCount()) { + SVDEBUG << "AudioCallbackPlaySource::addModel: This is more than the device channel count, signalling channelCountIncreased" << endl; + emit channelCountIncreased(getTargetChannelCount()); + } else { + SVDEBUG << "AudioCallbackPlaySource::addModel: This is no more than the device channel count (" << getDeviceChannelCount() << "), so taking no action" << endl; + } + } + + if (!m_fillThread) { + m_fillThread = new FillThread(*this); + m_fillThread->start(); + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + SVDEBUG << "AudioCallbackPlaySource::addModel: now have " << m_models.size() << " model(s)" << endl; +#endif + + connect(model, SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)), + this, SLOT(modelChangedWithin(sv_frame_t, sv_frame_t))); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySource::addModel: awakening thread" << endl; +#endif + + m_condition.wakeAll(); +} + +void +AudioCallbackPlaySource::modelChangedWithin(sv_frame_t +#ifdef DEBUG_AUDIO_PLAY_SOURCE + startFrame +#endif + , sv_frame_t endFrame) +{ +#ifdef DEBUG_AUDIO_PLAY_SOURCE + SVDEBUG << "AudioCallbackPlaySource::modelChangedWithin(" << startFrame << "," << endFrame << ")" << endl; +#endif + if (endFrame > m_lastModelEndFrame) { + m_lastModelEndFrame = endFrame; + rebuildRangeLists(); + } +} + +void +AudioCallbackPlaySource::removeModel(Model *model) +{ + m_mutex.lock(); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySource::removeModel(" << model << ")" << endl; +#endif + + disconnect(model, SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)), + this, SLOT(modelChangedWithin(sv_frame_t, sv_frame_t))); + + m_models.erase(model); + + // I don't think we have to do this any more: if a new model is + // loaded at a different rate, we'll hit the non-conflicting path + // in addModel and the rate will be updated without problems; but + // if a new model is loaded at the rate that we were using for the + // last one, then we save work by not having reset this here + // +// if (m_models.empty()) { +// m_sourceSampleRate = 0; +// } + + sv_frame_t lastEnd = 0; + for (std::set<Model *>::const_iterator i = m_models.begin(); + i != m_models.end(); ++i) { +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySource::removeModel(" << model << "): checking end frame on model " << *i << endl; +#endif + if ((*i)->getEndFrame() > lastEnd) { + lastEnd = (*i)->getEndFrame(); + } +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "(done, lastEnd now " << lastEnd << ")" << endl; +#endif + } + m_lastModelEndFrame = lastEnd; + + m_audioGenerator->removeModel(model); + + m_mutex.unlock(); + + clearRingBuffers(); +} + +void +AudioCallbackPlaySource::clearModels() +{ + m_mutex.lock(); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySource::clearModels()" << endl; +#endif + + m_models.clear(); + + m_lastModelEndFrame = 0; + + m_sourceSampleRate = 0; + + m_mutex.unlock(); + + m_audioGenerator->clearModels(); + + clearRingBuffers(); +} + +void +AudioCallbackPlaySource::clearRingBuffers(bool haveLock, int count) +{ + if (!haveLock) m_mutex.lock(); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "clearRingBuffers" << endl; +#endif + + rebuildRangeLists(); + + if (count == 0) { + if (m_writeBuffers) count = int(m_writeBuffers->size()); + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "current playing frame = " << getCurrentPlayingFrame() << endl; + + cout << "write buffer fill (before) = " << m_writeBufferFill << endl; +#endif + + m_writeBufferFill = getCurrentBufferedFrame(); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "current buffered frame = " << m_writeBufferFill << endl; +#endif + + if (m_readBuffers != m_writeBuffers) { + delete m_writeBuffers; + } + + m_writeBuffers = new RingBufferVector; + + for (int i = 0; i < count; ++i) { + m_writeBuffers->push_back(new RingBuffer<float>(m_ringBufferSize)); + } + + m_audioGenerator->reset(); + +// cout << "AudioCallbackPlaySource::clearRingBuffers: Created " +// << count << " write buffers" << endl; + + if (!haveLock) { + m_mutex.unlock(); + } +} + +void +AudioCallbackPlaySource::play(sv_frame_t startFrame) +{ + if (!m_target) return; + + if (!m_sourceSampleRate) { + SVCERR << "AudioCallbackPlaySource::play: No source sample rate available, not playing" << endl; + return; + } + + if (m_viewManager->getPlaySelectionMode() && + !m_viewManager->getSelections().empty()) { + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySource::play: constraining frame " << startFrame << " to selection = "; +#endif + + startFrame = m_viewManager->constrainFrameToSelection(startFrame); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << startFrame << endl; +#endif + + } else { + if (startFrame < 0) { + startFrame = 0; + } + if (startFrame >= m_lastModelEndFrame) { + startFrame = 0; + } + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "play(" << startFrame << ") -> aligned playback model "; +#endif + + startFrame = m_viewManager->alignReferenceToPlaybackFrame(startFrame); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << startFrame << endl; +#endif + + // The fill thread will automatically empty its buffers before + // starting again if we have not so far been playing, but not if + // we're just re-seeking. + // NO -- we can end up playing some first -- always reset here + + m_mutex.lock(); + + if (m_timeStretcher) { + m_timeStretcher->reset(); + } + if (m_monoStretcher) { + m_monoStretcher->reset(); + } + + m_readBufferFill = m_writeBufferFill = startFrame; + if (m_readBuffers) { + for (int c = 0; c < getTargetChannelCount(); ++c) { + RingBuffer<float> *rb = getReadRingBuffer(c); +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "reset ring buffer for channel " << c << endl; +#endif + if (rb) rb->reset(); + } + } + + m_mutex.unlock(); + + m_audioGenerator->reset(); + + m_playStartFrame = startFrame; + m_playStartFramePassed = false; + m_playStartedAt = RealTime::zeroTime; + if (m_target) { + m_playStartedAt = RealTime::fromSeconds(m_target->getCurrentTime()); + } + + bool changed = !m_playing; + m_lastRetrievalTimestamp = 0; + m_lastCurrentFrame = 0; + m_playing = true; + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySource::play: awakening thread" << endl; +#endif + + m_condition.wakeAll(); + if (changed) { + emit playStatusChanged(m_playing); + emit activity(tr("Play from %1").arg + (RealTime::frame2RealTime + (m_playStartFrame, m_sourceSampleRate).toText().c_str())); + } +} + +void +AudioCallbackPlaySource::stop() +{ +#ifdef DEBUG_AUDIO_PLAY_SOURCE + SVDEBUG << "AudioCallbackPlaySource::stop()" << endl; +#endif + bool changed = m_playing; + m_playing = false; + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySource::stop: awakening thread" << endl; +#endif + + m_condition.wakeAll(); + m_lastRetrievalTimestamp = 0; + if (changed) { + emit playStatusChanged(m_playing); + emit activity(tr("Stop at %1").arg + (RealTime::frame2RealTime + (m_lastCurrentFrame, m_sourceSampleRate).toText().c_str())); + } + m_lastCurrentFrame = 0; +} + +void +AudioCallbackPlaySource::selectionChanged() +{ + if (m_viewManager->getPlaySelectionMode()) { + clearRingBuffers(); + } +} + +void +AudioCallbackPlaySource::playLoopModeChanged() +{ + clearRingBuffers(); +} + +void +AudioCallbackPlaySource::playSelectionModeChanged() +{ + if (!m_viewManager->getSelections().empty()) { + clearRingBuffers(); + } +} + +void +AudioCallbackPlaySource::playParametersChanged(PlayParameters *) +{ + clearRingBuffers(); +} + +void +AudioCallbackPlaySource::preferenceChanged(PropertyContainer::PropertyName ) +{ +} + +void +AudioCallbackPlaySource::audioProcessingOverload() +{ + SVCERR << "Audio processing overload!" << endl; + + if (!m_playing) return; + + RealTimePluginInstance *ap = m_auditioningPlugin; + if (ap && !m_auditioningPluginBypassed) { + m_auditioningPluginBypassed = true; + emit audioOverloadPluginDisabled(); + return; + } + + if (m_timeStretcher && + m_timeStretcher->getTimeRatio() < 1.0 && + m_stretcherInputCount > 1 && + m_monoStretcher && !m_stretchMono) { + m_stretchMono = true; + emit audioTimeStretchMultiChannelDisabled(); + return; + } +} + +void +AudioCallbackPlaySource::setSystemPlaybackTarget(breakfastquay::SystemPlaybackTarget *target) +{ + if (target == 0) { + // reset target-related facts and figures + m_deviceSampleRate = 0; + m_deviceChannelCount = 0; + } + m_target = target; +} + +void +AudioCallbackPlaySource::setResamplerWrapper(breakfastquay::ResamplerWrapper *w) +{ + m_resamplerWrapper = w; + if (m_resamplerWrapper && m_sourceSampleRate != 0) { + m_resamplerWrapper->changeApplicationSampleRate + (int(round(m_sourceSampleRate))); + } +} + +void +AudioCallbackPlaySource::setSystemPlaybackBlockSize(int size) +{ + cout << "AudioCallbackPlaySource::setTarget: Block size -> " << size << endl; + if (size != 0) { + m_blockSize = size; + } + if (size * 4 > m_ringBufferSize) { +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySource::setTarget: Buffer size " + << size << " > a quarter of ring buffer size " + << m_ringBufferSize << ", calling for more ring buffer" + << endl; +#endif + m_ringBufferSize = size * 4; + if (m_writeBuffers && !m_writeBuffers->empty()) { + clearRingBuffers(); + } + } +} + +int +AudioCallbackPlaySource::getTargetBlockSize() const +{ +// cout << "AudioCallbackPlaySource::getTargetBlockSize() -> " << m_blockSize << endl; + return int(m_blockSize); +} + +void +AudioCallbackPlaySource::setSystemPlaybackLatency(int latency) +{ + m_playLatency = latency; +} + +sv_frame_t +AudioCallbackPlaySource::getTargetPlayLatency() const +{ + return m_playLatency; +} + +sv_frame_t +AudioCallbackPlaySource::getCurrentPlayingFrame() +{ + // This method attempts to estimate which audio sample frame is + // "currently coming through the speakers". + + sv_samplerate_t deviceRate = getDeviceSampleRate(); + sv_frame_t latency = m_playLatency; // at target rate + RealTime latency_t = RealTime::zeroTime; + + if (deviceRate != 0) { + latency_t = RealTime::frame2RealTime(latency, deviceRate); + } + + return getCurrentFrame(latency_t); +} + +sv_frame_t +AudioCallbackPlaySource::getCurrentBufferedFrame() +{ + return getCurrentFrame(RealTime::zeroTime); +} + +sv_frame_t +AudioCallbackPlaySource::getCurrentFrame(RealTime latency_t) +{ + // The ring buffers contain data at the source sample rate and all + // processing (including time stretching) happens at this + // rate. Resampling only happens after the audio data leaves this + // class. + + // (But because historically more than one sample rate could have + // been involved here, we do latency calculations using RealTime + // values instead of samples.) + + sv_samplerate_t rate = getSourceSampleRate(); + + if (rate == 0) return 0; + + int inbuffer = 0; // at target rate + + for (int c = 0; c < getTargetChannelCount(); ++c) { + RingBuffer<float> *rb = getReadRingBuffer(c); + if (rb) { + int here = rb->getReadSpace(); + if (c == 0 || here < inbuffer) inbuffer = here; + } + } + + sv_frame_t readBufferFill = m_readBufferFill; + sv_frame_t lastRetrievedBlockSize = m_lastRetrievedBlockSize; + double lastRetrievalTimestamp = m_lastRetrievalTimestamp; + double currentTime = 0.0; + if (m_target) currentTime = m_target->getCurrentTime(); + + bool looping = m_viewManager->getPlayLoopMode(); + + RealTime inbuffer_t = RealTime::frame2RealTime(inbuffer, rate); + + sv_frame_t stretchlat = 0; + double timeRatio = 1.0; + + if (m_timeStretcher) { + stretchlat = m_timeStretcher->getLatency(); + timeRatio = m_timeStretcher->getTimeRatio(); + } + + RealTime stretchlat_t = RealTime::frame2RealTime(stretchlat, rate); + + // When the target has just requested a block from us, the last + // sample it obtained was our buffer fill frame count minus the + // amount of read space (converted back to source sample rate) + // remaining now. That sample is not expected to be played until + // the target's play latency has elapsed. By the time the + // following block is requested, that sample will be at the + // target's play latency minus the last requested block size away + // from being played. + + RealTime sincerequest_t = RealTime::zeroTime; + RealTime lastretrieved_t = RealTime::zeroTime; + + if (m_target && + m_trustworthyTimestamps && + lastRetrievalTimestamp != 0.0) { + + lastretrieved_t = RealTime::frame2RealTime(lastRetrievedBlockSize, rate); + + // calculate number of frames at target rate that have elapsed + // since the end of the last call to getSourceSamples + + if (m_trustworthyTimestamps && !looping) { + + // this adjustment seems to cause more problems when looping + double elapsed = currentTime - lastRetrievalTimestamp; + + if (elapsed > 0.0) { + sincerequest_t = RealTime::fromSeconds(elapsed); + } + } + + } else { + + lastretrieved_t = RealTime::frame2RealTime(getTargetBlockSize(), rate); + } + + RealTime bufferedto_t = RealTime::frame2RealTime(readBufferFill, rate); + + if (timeRatio != 1.0) { + lastretrieved_t = lastretrieved_t / timeRatio; + sincerequest_t = sincerequest_t / timeRatio; + latency_t = latency_t / timeRatio; + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << "\nbuffered to: " << bufferedto_t << ", in buffer: " << inbuffer_t << ", time ratio " << timeRatio << "\n stretcher latency: " << stretchlat_t << ", device latency: " << latency_t << "\n since request: " << sincerequest_t << ", last retrieved quantity: " << lastretrieved_t << endl; +#endif + + // Normally the range lists should contain at least one item each + // -- if playback is unconstrained, that item should report the + // entire source audio duration. + + if (m_rangeStarts.empty()) { + rebuildRangeLists(); + } + + if (m_rangeStarts.empty()) { + // this code is only used in case of error in rebuildRangeLists + RealTime playing_t = bufferedto_t + - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t + + sincerequest_t; + if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime; + sv_frame_t frame = RealTime::realTime2Frame(playing_t, rate); + return m_viewManager->alignPlaybackFrameToReference(frame); + } + + int inRange = 0; + int index = 0; + + for (int i = 0; i < (int)m_rangeStarts.size(); ++i) { + if (bufferedto_t >= m_rangeStarts[i]) { + inRange = index; + } else { + break; + } + ++index; + } + + if (inRange >= int(m_rangeStarts.size())) { + inRange = int(m_rangeStarts.size())-1; + } + + RealTime playing_t = bufferedto_t; + + playing_t = playing_t + - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t + + sincerequest_t; + + // This rather gross little hack is used to ensure that latency + // compensation doesn't result in the playback pointer appearing + // to start earlier than the actual playback does. It doesn't + // work properly (hence the bail-out in the middle) because if we + // are playing a relatively short looped region, the playing time + // estimated from the buffer fill frame may have wrapped around + // the region boundary and end up being much smaller than the + // theoretical play start frame, perhaps even for the entire + // duration of playback! + + if (!m_playStartFramePassed) { + RealTime playstart_t = RealTime::frame2RealTime(m_playStartFrame, rate); + if (playing_t < playstart_t) { +// cout << "playing_t " << playing_t << " < playstart_t " +// << playstart_t << endl; + if (/*!!! sincerequest_t > RealTime::zeroTime && */ + m_playStartedAt + latency_t + stretchlat_t < + RealTime::fromSeconds(currentTime)) { +// cout << "but we've been playing for long enough that I think we should disregard it (it probably results from loop wrapping)" << endl; + m_playStartFramePassed = true; + } else { + playing_t = playstart_t; + } + } else { + m_playStartFramePassed = true; + } + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << "playing_t " << playing_t; +#endif + + playing_t = playing_t - m_rangeStarts[inRange]; + +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << " as offset into range " << inRange << " (start =" << m_rangeStarts[inRange] << " duration =" << m_rangeDurations[inRange] << ") = " << playing_t << endl; +#endif + + while (playing_t < RealTime::zeroTime) { + + if (inRange == 0) { + if (looping) { + inRange = int(m_rangeStarts.size()) - 1; + } else { + break; + } + } else { + --inRange; + } + + playing_t = playing_t + m_rangeDurations[inRange]; + } + + playing_t = playing_t + m_rangeStarts[inRange]; + +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << " playing time: " << playing_t << endl; +#endif + + if (!looping) { + if (inRange == (int)m_rangeStarts.size()-1 && + playing_t >= m_rangeStarts[inRange] + m_rangeDurations[inRange]) { +cout << "Not looping, inRange " << inRange << " == rangeStarts.size()-1, playing_t " << playing_t << " >= m_rangeStarts[inRange] " << m_rangeStarts[inRange] << " + m_rangeDurations[inRange] " << m_rangeDurations[inRange] << " -- stopping" << endl; + stop(); + } + } + + if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime; + + sv_frame_t frame = RealTime::realTime2Frame(playing_t, rate); + + if (m_lastCurrentFrame > 0 && !looping) { + if (frame < m_lastCurrentFrame) { + frame = m_lastCurrentFrame; + } + } + + m_lastCurrentFrame = frame; + + return m_viewManager->alignPlaybackFrameToReference(frame); +} + +void +AudioCallbackPlaySource::rebuildRangeLists() +{ + bool constrained = (m_viewManager->getPlaySelectionMode()); + + m_rangeStarts.clear(); + m_rangeDurations.clear(); + + sv_samplerate_t sourceRate = getSourceSampleRate(); + if (sourceRate == 0) return; + + RealTime end = RealTime::frame2RealTime(m_lastModelEndFrame, sourceRate); + if (end == RealTime::zeroTime) return; + + if (!constrained) { + m_rangeStarts.push_back(RealTime::zeroTime); + m_rangeDurations.push_back(end); + return; + } + + MultiSelection::SelectionList selections = m_viewManager->getSelections(); + MultiSelection::SelectionList::const_iterator i; + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + SVDEBUG << "AudioCallbackPlaySource::rebuildRangeLists" << endl; +#endif + + if (!selections.empty()) { + + for (i = selections.begin(); i != selections.end(); ++i) { + + RealTime start = + (RealTime::frame2RealTime + (m_viewManager->alignReferenceToPlaybackFrame(i->getStartFrame()), + sourceRate)); + RealTime duration = + (RealTime::frame2RealTime + (m_viewManager->alignReferenceToPlaybackFrame(i->getEndFrame()) - + m_viewManager->alignReferenceToPlaybackFrame(i->getStartFrame()), + sourceRate)); + + m_rangeStarts.push_back(start); + m_rangeDurations.push_back(duration); + } + } else { + m_rangeStarts.push_back(RealTime::zeroTime); + m_rangeDurations.push_back(end); + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "Now have " << m_rangeStarts.size() << " play ranges" << endl; +#endif +} + +void +AudioCallbackPlaySource::setOutputLevels(float left, float right) +{ + if (left > m_outputLeft) m_outputLeft = left; + if (right > m_outputRight) m_outputRight = right; + m_levelsSet = true; +} + +bool +AudioCallbackPlaySource::getOutputLevels(float &left, float &right) +{ + left = m_outputLeft; + right = m_outputRight; + bool valid = m_levelsSet; + m_outputLeft = 0.f; + m_outputRight = 0.f; + m_levelsSet = false; + return valid; +} + +void +AudioCallbackPlaySource::setSystemPlaybackSampleRate(int sr) +{ + m_deviceSampleRate = sr; +} + +void +AudioCallbackPlaySource::setSystemPlaybackChannelCount(int count) +{ + m_deviceChannelCount = count; +} + +void +AudioCallbackPlaySource::setAuditioningEffect(Auditionable *a) +{ + RealTimePluginInstance *plugin = dynamic_cast<RealTimePluginInstance *>(a); + if (a && !plugin) { + SVCERR << "WARNING: AudioCallbackPlaySource::setAuditioningEffect: auditionable object " << a << " is not a real-time plugin instance" << endl; + } + + m_mutex.lock(); + m_auditioningPlugin = plugin; + m_auditioningPluginBypassed = false; + m_mutex.unlock(); +} + +void +AudioCallbackPlaySource::setSoloModelSet(std::set<Model *> s) +{ + m_audioGenerator->setSoloModelSet(s); + clearRingBuffers(); +} + +void +AudioCallbackPlaySource::clearSoloModelSet() +{ + m_audioGenerator->clearSoloModelSet(); + clearRingBuffers(); +} + +sv_samplerate_t +AudioCallbackPlaySource::getDeviceSampleRate() const +{ + return m_deviceSampleRate; +} + +int +AudioCallbackPlaySource::getSourceChannelCount() const +{ + return m_sourceChannelCount; +} + +int +AudioCallbackPlaySource::getTargetChannelCount() const +{ + if (m_sourceChannelCount < 2) return 2; + return m_sourceChannelCount; +} + +int +AudioCallbackPlaySource::getDeviceChannelCount() const +{ + return m_deviceChannelCount; +} + +sv_samplerate_t +AudioCallbackPlaySource::getSourceSampleRate() const +{ + return m_sourceSampleRate; +} + +void +AudioCallbackPlaySource::setTimeStretch(double factor) +{ + m_stretchRatio = factor; + + int rate = int(getSourceSampleRate()); + if (!rate) return; // have to make our stretcher later + + if (m_timeStretcher || (factor == 1.0)) { + // stretch ratio will be set in next process call if appropriate + } else { + m_stretcherInputCount = getTargetChannelCount(); + RubberBandStretcher *stretcher = new RubberBandStretcher + (rate, + m_stretcherInputCount, + RubberBandStretcher::OptionProcessRealTime, + factor); + RubberBandStretcher *monoStretcher = new RubberBandStretcher + (rate, + 1, + RubberBandStretcher::OptionProcessRealTime, + factor); + m_stretcherInputs = new float *[m_stretcherInputCount]; + m_stretcherInputSizes = new sv_frame_t[m_stretcherInputCount]; + for (int c = 0; c < m_stretcherInputCount; ++c) { + m_stretcherInputSizes[c] = 16384; + m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]]; + } + m_monoStretcher = monoStretcher; + m_timeStretcher = stretcher; + } + + emit activity(tr("Change time-stretch factor to %1").arg(factor)); +} + +int +AudioCallbackPlaySource::getSourceSamples(float *const *buffer, + int requestedChannels, + int count) +{ + // In principle, the target will handle channel mapping in cases + // where our channel count differs from the device's. But that + // only holds if our channel count doesn't change -- i.e. if + // getApplicationChannelCount() always returns the same value as + // it did when the target was created, and if this function always + // returns that number of channels. + // + // Unfortunately that can't hold for us -- we always have at least + // 2 channels but if the user opens a new main model with more + // channels than that (and more than the last main model) then our + // target channel count necessarily gets increased. + // + // We have: + // + // getSourceChannelCount() -> number of channels available to + // provide from real model data + // + // getTargetChannelCount() -> number we will actually provide; + // same as getSourceChannelCount() except that it is always at + // least 2 + // + // getDeviceChannelCount() -> number the device will emit, usually + // equal to the value of getTargetChannelCount() at the time the + // device was initialised, unless the device could not provide + // that number + // + // requestedChannels -> number the device is expecting from us, + // always equal to the value of getTargetChannelCount() at the + // time the device was initialised + // + // If the requested channel count is at least the target channel + // count, then we go ahead and provide the target channels as + // expected. We just zero any spare channels. + // + // If the requested channel count is smaller than the target + // channel count, then we don't know what to do and we provide + // nothing. This shouldn't happen as long as management is on the + // ball -- we emit channelCountIncreased() when the target channel + // count increases, and whatever code "owns" the driver should + // have reopened the audio device when it got that signal. But + // there's a race condition there, which we accommodate with this + // check. + + int channels = getTargetChannelCount(); + + if (!m_playing) { +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << "AudioCallbackPlaySource::getSourceSamples: Not playing" << endl; +#endif + v_zero_channels(buffer, requestedChannels, count); + return 0; + } + if (requestedChannels < channels) { + SVDEBUG << "AudioCallbackPlaySource::getSourceSamples: Not enough device channels (" << requestedChannels << ", need " << channels << "); hoping device is about to be reopened" << endl; + v_zero_channels(buffer, requestedChannels, count); + return 0; + } + if (requestedChannels > channels) { + v_zero_channels(buffer + channels, requestedChannels - channels, count); + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << "AudioCallbackPlaySource::getSourceSamples: Playing" << endl; +#endif + + // Ensure that all buffers have at least the amount of data we + // need -- else reduce the size of our requests correspondingly + + for (int ch = 0; ch < channels; ++ch) { + + RingBuffer<float> *rb = getReadRingBuffer(ch); + + if (!rb) { + SVCERR << "WARNING: AudioCallbackPlaySource::getSourceSamples: " + << "No ring buffer available for channel " << ch + << ", returning no data here" << endl; + count = 0; + break; + } + + int rs = rb->getReadSpace(); + if (rs < count) { +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: " + << "Ring buffer for channel " << ch << " has only " + << rs << " (of " << count << ") samples available (" + << "ring buffer size is " << rb->getSize() << ", write " + << "space " << rb->getWriteSpace() << "), " + << "reducing request size" << endl; +#endif + count = rs; + } + } + + if (count == 0) return 0; + + RubberBandStretcher *ts = m_timeStretcher; + RubberBandStretcher *ms = m_monoStretcher; + + double ratio = ts ? ts->getTimeRatio() : 1.0; + + if (ratio != m_stretchRatio) { + if (!ts) { + SVCERR << "WARNING: AudioCallbackPlaySource::getSourceSamples: Time ratio change to " << m_stretchRatio << " is pending, but no stretcher is set" << endl; + m_stretchRatio = 1.0; + } else { + ts->setTimeRatio(m_stretchRatio); + if (ms) ms->setTimeRatio(m_stretchRatio); + if (m_stretchRatio >= 1.0) m_stretchMono = false; + } + } + + int stretchChannels = m_stretcherInputCount; + if (m_stretchMono) { + if (ms) { + ts = ms; + stretchChannels = 1; + } else { + m_stretchMono = false; + } + } + + if (m_target) { + m_lastRetrievedBlockSize = count; + m_lastRetrievalTimestamp = m_target->getCurrentTime(); + } + + if (!ts || ratio == 1.f) { + + int got = 0; + +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << "channels == " << channels << endl; +#endif + + for (int ch = 0; ch < channels; ++ch) { + + RingBuffer<float> *rb = getReadRingBuffer(ch); + + if (rb) { + + // this is marginally more likely to leave our channels in + // sync after a processing failure than just passing "count": + sv_frame_t request = count; + if (ch > 0) request = got; + + got = rb->read(buffer[ch], int(request)); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << "AudioCallbackPlaySource::getSamples: got " << got << " (of " << count << ") samples on channel " << ch << ", signalling for more (possibly)" << endl; +#endif + } + + for (int ch = 0; ch < channels; ++ch) { + for (int i = got; i < count; ++i) { + buffer[ch][i] = 0.0; + } + } + } + + applyAuditioningEffect(count, buffer); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySource::getSamples: awakening thread" << endl; +#endif + + m_condition.wakeAll(); + + return got; + } + + sv_frame_t available; + sv_frame_t fedToStretcher = 0; + int warned = 0; + + // The input block for a given output is approx output / ratio, + // but we can't predict it exactly, for an adaptive timestretcher. + + while ((available = ts->available()) < count) { + + sv_frame_t reqd = lrint(double(count - available) / ratio); + reqd = std::max(reqd, sv_frame_t(ts->getSamplesRequired())); + if (reqd == 0) reqd = 1; + + sv_frame_t got = reqd; + +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << "reqd = " <<reqd << ", channels = " << channels << ", ic = " << m_stretcherInputCount << endl; +#endif + + for (int c = 0; c < channels; ++c) { + if (c >= m_stretcherInputCount) continue; + if (reqd > m_stretcherInputSizes[c]) { + if (c == 0) { + SVDEBUG << "NOTE: resizing stretcher input buffer from " << m_stretcherInputSizes[c] << " to " << (reqd * 2) << endl; + } + delete[] m_stretcherInputs[c]; + m_stretcherInputSizes[c] = reqd * 2; + m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]]; + } + } + + for (int c = 0; c < channels; ++c) { + if (c >= m_stretcherInputCount) continue; + RingBuffer<float> *rb = getReadRingBuffer(c); + if (rb) { + sv_frame_t gotHere; + if (stretchChannels == 1 && c > 0) { + gotHere = rb->readAdding(m_stretcherInputs[0], int(got)); + } else { + gotHere = rb->read(m_stretcherInputs[c], int(got)); + } + if (gotHere < got) got = gotHere; + +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + if (c == 0) { + cout << "feeding stretcher: got " << gotHere + << ", " << rb->getReadSpace() << " remain" << endl; + } +#endif + + } else { + SVCERR << "WARNING: No ring buffer available for channel " << c << " in stretcher input block" << endl; + } + } + + if (got < reqd) { + SVCERR << "WARNING: Read underrun in playback (" + << got << " < " << reqd << ")" << endl; + } + + ts->process(m_stretcherInputs, size_t(got), false); + + fedToStretcher += got; + + if (got == 0) break; + + if (ts->available() == available) { + SVCERR << "WARNING: AudioCallbackPlaySource::getSamples: Added " << got << " samples to time stretcher, created no new available output samples (warned = " << warned << ")" << endl; + if (++warned == 5) break; + } + } + + ts->retrieve(buffer, size_t(count)); + + v_zero_channels(buffer + stretchChannels, channels - stretchChannels, count); + + applyAuditioningEffect(count, buffer); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySource::getSamples [stretched]: awakening thread" << endl; +#endif + + m_condition.wakeAll(); + + return count; +} + +void +AudioCallbackPlaySource::applyAuditioningEffect(sv_frame_t count, float *const *buffers) +{ + if (m_auditioningPluginBypassed) return; + RealTimePluginInstance *plugin = m_auditioningPlugin; + if (!plugin) return; + + if ((int)plugin->getAudioInputCount() != getTargetChannelCount()) { +// cout << "plugin input count " << plugin->getAudioInputCount() +// << " != our channel count " << getTargetChannelCount() +// << endl; + return; + } + if ((int)plugin->getAudioOutputCount() != getTargetChannelCount()) { +// cout << "plugin output count " << plugin->getAudioOutputCount() +// << " != our channel count " << getTargetChannelCount() +// << endl; + return; + } + if ((int)plugin->getBufferSize() < count) { +// cout << "plugin buffer size " << plugin->getBufferSize() +// << " < our block size " << count +// << endl; + return; + } + + float **ib = plugin->getAudioInputBuffers(); + float **ob = plugin->getAudioOutputBuffers(); + + for (int c = 0; c < getTargetChannelCount(); ++c) { + for (int i = 0; i < count; ++i) { + ib[c][i] = buffers[c][i]; + } + } + + plugin->run(Vamp::RealTime::zeroTime, int(count)); + + for (int c = 0; c < getTargetChannelCount(); ++c) { + for (int i = 0; i < count; ++i) { + buffers[c][i] = ob[c][i]; + } + } +} + +// Called from fill thread, m_playing true, mutex held +bool +AudioCallbackPlaySource::fillBuffers() +{ + static float *tmp = 0; + static sv_frame_t tmpSize = 0; + + sv_frame_t space = 0; + for (int c = 0; c < getTargetChannelCount(); ++c) { + RingBuffer<float> *wb = getWriteRingBuffer(c); + if (wb) { + sv_frame_t spaceHere = wb->getWriteSpace(); + if (c == 0 || spaceHere < space) space = spaceHere; + } + } + + if (space == 0) { +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySourceFillThread: no space to fill" << endl; +#endif + return false; + } + + // space is now the number of samples that can be written on each + // channel's write ringbuffer + + sv_frame_t f = m_writeBufferFill; + + bool readWriteEqual = (m_readBuffers == m_writeBuffers); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + if (!readWriteEqual) { + cout << "AudioCallbackPlaySourceFillThread: note read buffers != write buffers" << endl; + } + cout << "AudioCallbackPlaySourceFillThread: filling " << space << " frames" << endl; +#endif + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "buffered to " << f << " already" << endl; +#endif + + int channels = getTargetChannelCount(); + + static float **bufferPtrs = 0; + static int bufferPtrCount = 0; + + if (bufferPtrCount < channels) { + if (bufferPtrs) delete[] bufferPtrs; + bufferPtrs = new float *[channels]; + bufferPtrCount = channels; + } + + sv_frame_t generatorBlockSize = m_audioGenerator->getBlockSize(); + + // space must be a multiple of generatorBlockSize + sv_frame_t reqSpace = space; + space = (reqSpace / generatorBlockSize) * generatorBlockSize; + if (space == 0) { +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "requested fill of " << reqSpace + << " is less than generator block size of " + << generatorBlockSize << ", leaving it" << endl; +#endif + return false; + } + + if (tmpSize < channels * space) { + delete[] tmp; + tmp = new float[channels * space]; + tmpSize = channels * space; + } + + for (int c = 0; c < channels; ++c) { + + bufferPtrs[c] = tmp + c * space; + + for (int i = 0; i < space; ++i) { + tmp[c * space + i] = 0.0f; + } + } + + sv_frame_t got = mixModels(f, space, bufferPtrs); // also modifies f + + for (int c = 0; c < channels; ++c) { + + RingBuffer<float> *wb = getWriteRingBuffer(c); + if (wb) { + int actual = wb->write(bufferPtrs[c], int(got)); +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "Wrote " << actual << " samples for ch " << c << ", now " + << wb->getReadSpace() << " to read" + << endl; +#endif + if (actual < got) { + SVCERR << "WARNING: Buffer overrun in channel " << c + << ": wrote " << actual << " of " << got + << " samples" << endl; + } + } + } + + m_writeBufferFill = f; + if (readWriteEqual) m_readBufferFill = f; + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "Read buffer fill is now " << m_readBufferFill << ", write buffer fill " + << m_writeBufferFill << endl; +#endif + + //!!! how do we know when ended? need to mark up a fully-buffered flag and check this if we find the buffers empty in getSourceSamples + + return true; +} + +sv_frame_t +AudioCallbackPlaySource::mixModels(sv_frame_t &frame, sv_frame_t count, float **buffers) +{ + sv_frame_t processed = 0; + sv_frame_t chunkStart = frame; + sv_frame_t chunkSize = count; + sv_frame_t selectionSize = 0; + sv_frame_t nextChunkStart = chunkStart + chunkSize; + + bool looping = m_viewManager->getPlayLoopMode(); + bool constrained = (m_viewManager->getPlaySelectionMode() && + !m_viewManager->getSelections().empty()); + + int channels = getTargetChannelCount(); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "mixModels: start " << frame << ", size " << count << ", channels " << channels << endl; +#endif +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + if (constrained) { + cout << "Manager has " << m_viewManager->getSelections().size() << " selection(s):" << endl; + for (auto sel: m_viewManager->getSelections()) { + cout << sel.getStartFrame() << " -> " << sel.getEndFrame() + << " (" << (sel.getEndFrame() - sel.getStartFrame()) << " frames)" + << endl; + } + } +#endif + + static float **chunkBufferPtrs = 0; + static int chunkBufferPtrCount = 0; + + if (chunkBufferPtrCount < channels) { + if (chunkBufferPtrs) delete[] chunkBufferPtrs; + chunkBufferPtrs = new float *[channels]; + chunkBufferPtrCount = channels; + } + + for (int c = 0; c < channels; ++c) { + chunkBufferPtrs[c] = buffers[c]; + } + + while (processed < count) { + + chunkSize = count - processed; + nextChunkStart = chunkStart + chunkSize; + selectionSize = 0; + + sv_frame_t fadeIn = 0, fadeOut = 0; + + if (constrained) { + + sv_frame_t rChunkStart = + m_viewManager->alignPlaybackFrameToReference(chunkStart); + + Selection selection = + m_viewManager->getContainingSelection(rChunkStart, true); + + if (selection.isEmpty()) { + if (looping) { + selection = *m_viewManager->getSelections().begin(); + chunkStart = m_viewManager->alignReferenceToPlaybackFrame + (selection.getStartFrame()); + fadeIn = 50; + } + } + + if (selection.isEmpty()) { + + chunkSize = 0; + nextChunkStart = chunkStart; + + } else { + + sv_frame_t sf = m_viewManager->alignReferenceToPlaybackFrame + (selection.getStartFrame()); + sv_frame_t ef = m_viewManager->alignReferenceToPlaybackFrame + (selection.getEndFrame()); + + selectionSize = ef - sf; + + if (chunkStart < sf) { + chunkStart = sf; + fadeIn = 50; + } + + nextChunkStart = chunkStart + chunkSize; + + if (nextChunkStart >= ef) { + nextChunkStart = ef; + fadeOut = 50; + } + + chunkSize = nextChunkStart - chunkStart; + } + + } else if (looping && m_lastModelEndFrame > 0) { + + if (chunkStart >= m_lastModelEndFrame) { + chunkStart = 0; + } + if (chunkSize > m_lastModelEndFrame - chunkStart) { + chunkSize = m_lastModelEndFrame - chunkStart; + } + nextChunkStart = chunkStart + chunkSize; + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << "chunkStart " << chunkStart << ", chunkSize " << chunkSize << ", nextChunkStart " << nextChunkStart << ", frame " << frame << ", count " << count << ", processed " << processed << endl; +#endif + + if (!chunkSize) { + // We need to maintain full buffers so that the other + // thread can tell where it's got to in the playback -- so + // return the full amount here + frame = frame + count; + if (frame < nextChunkStart) { + frame = nextChunkStart; + } +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "mixModels: ending at " << nextChunkStart << ", returning frame as " + << frame << endl; +#endif + return count; + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "mixModels: chunk at " << chunkStart << " -> " << nextChunkStart << " (size " << chunkSize << ")" << endl; +#endif + + if (selectionSize < 100) { + fadeIn = 0; + fadeOut = 0; + } else if (selectionSize < 300) { + if (fadeIn > 0) fadeIn = 10; + if (fadeOut > 0) fadeOut = 10; + } + + if (fadeIn > 0) { + if (processed * 2 < fadeIn) { + fadeIn = processed * 2; + } + } + + if (fadeOut > 0) { + if ((count - processed - chunkSize) * 2 < fadeOut) { + fadeOut = (count - processed - chunkSize) * 2; + } + } + + for (std::set<Model *>::iterator mi = m_models.begin(); + mi != m_models.end(); ++mi) { + + (void) m_audioGenerator->mixModel(*mi, chunkStart, + chunkSize, chunkBufferPtrs, + fadeIn, fadeOut); + } + + for (int c = 0; c < channels; ++c) { + chunkBufferPtrs[c] += chunkSize; + } + + processed += chunkSize; + chunkStart = nextChunkStart; + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "mixModels returning " << processed << " frames to " << nextChunkStart << endl; +#endif + + frame = nextChunkStart; + return processed; +} + +void +AudioCallbackPlaySource::unifyRingBuffers() +{ + if (m_readBuffers == m_writeBuffers) return; + + // only unify if there will be something to read + for (int c = 0; c < getTargetChannelCount(); ++c) { + RingBuffer<float> *wb = getWriteRingBuffer(c); + if (wb) { + if (wb->getReadSpace() < m_blockSize * 2) { + if ((m_writeBufferFill + m_blockSize * 2) < + m_lastModelEndFrame) { + // OK, we don't have enough and there's more to + // read -- don't unify until we can do better +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << "AudioCallbackPlaySource::unifyRingBuffers: Not unifying: write buffer has less (" << wb->getReadSpace() << ") than " << m_blockSize*2 << " to read and write buffer fill (" << m_writeBufferFill << ") is not close to end frame (" << m_lastModelEndFrame << ")" << endl; +#endif + return; + } + } + break; + } + } + + sv_frame_t rf = m_readBufferFill; + RingBuffer<float> *rb = getReadRingBuffer(0); + if (rb) { + int rs = rb->getReadSpace(); + //!!! incorrect when in non-contiguous selection, see comments elsewhere +// cout << "rs = " << rs << endl; + if (rs < rf) rf -= rs; + else rf = 0; + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << "AudioCallbackPlaySource::unifyRingBuffers: m_readBufferFill = " << m_readBufferFill << ", rf = " << rf << ", m_writeBufferFill = " << m_writeBufferFill << endl; +#endif + + sv_frame_t wf = m_writeBufferFill; + sv_frame_t skip = 0; + for (int c = 0; c < getTargetChannelCount(); ++c) { + RingBuffer<float> *wb = getWriteRingBuffer(c); + if (wb) { + if (c == 0) { + + int wrs = wb->getReadSpace(); +// cout << "wrs = " << wrs << endl; + + if (wrs < wf) wf -= wrs; + else wf = 0; +// cout << "wf = " << wf << endl; + + if (wf < rf) skip = rf - wf; + if (skip == 0) break; + } + +// cout << "skipping " << skip << endl; + wb->skip(int(skip)); + } + } + + m_bufferScavenger.claim(m_readBuffers); + m_readBuffers = m_writeBuffers; + m_readBufferFill = m_writeBufferFill; +#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING + cout << "unified" << endl; +#endif +} + +void +AudioCallbackPlaySource::FillThread::run() +{ + AudioCallbackPlaySource &s(m_source); + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySourceFillThread starting" << endl; +#endif + + s.m_mutex.lock(); + + bool previouslyPlaying = s.m_playing; + bool work = false; + + while (!s.m_exiting) { + + s.unifyRingBuffers(); + s.m_bufferScavenger.scavenge(); + s.m_pluginScavenger.scavenge(); + + if (work && s.m_playing && s.getSourceSampleRate()) { + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySourceFillThread: not waiting" << endl; +#endif + + s.m_mutex.unlock(); + s.m_mutex.lock(); + + } else { + + double ms = 100; + if (s.getSourceSampleRate() > 0) { + ms = double(s.m_ringBufferSize) / s.getSourceSampleRate() * 1000.0; + } + + if (s.m_playing) ms /= 10; + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + if (!s.m_playing) cout << endl; + cout << "AudioCallbackPlaySourceFillThread: waiting for " << ms << "ms..." << endl; +#endif + + s.m_condition.wait(&s.m_mutex, int(ms)); + } + +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySourceFillThread: awoken" << endl; +#endif + + work = false; + + if (!s.getSourceSampleRate()) { +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySourceFillThread: source sample rate is zero" << endl; +#endif + continue; + } + + bool playing = s.m_playing; + + if (playing && !previouslyPlaying) { +#ifdef DEBUG_AUDIO_PLAY_SOURCE + cout << "AudioCallbackPlaySourceFillThread: playback state changed, resetting" << endl; +#endif + for (int c = 0; c < s.getTargetChannelCount(); ++c) { + RingBuffer<float> *rb = s.getReadRingBuffer(c); + if (rb) rb->reset(); + } + } + previouslyPlaying = playing; + + work = s.fillBuffers(); + } + + s.m_mutex.unlock(); +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/audio/AudioCallbackPlaySource.h Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,448 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + This file copyright 2006 Chris Cannam and QMUL. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#ifndef SV_AUDIO_CALLBACK_PLAY_SOURCE_H +#define SV_AUDIO_CALLBACK_PLAY_SOURCE_H + +#include "base/RingBuffer.h" +#include "base/AudioPlaySource.h" +#include "base/PropertyContainer.h" +#include "base/Scavenger.h" + +#include <bqaudioio/ApplicationPlaybackSource.h> + +#include <QObject> +#include <QMutex> +#include <QWaitCondition> + +#include "base/Thread.h" +#include "base/RealTime.h" + +#include <samplerate.h> + +#include <set> +#include <map> + +namespace RubberBand { + class RubberBandStretcher; +} + +namespace breakfastquay { + class ResamplerWrapper; +} + +class Model; +class ViewManagerBase; +class AudioGenerator; +class PlayParameters; +class RealTimePluginInstance; +class AudioCallbackPlayTarget; + +/** + * AudioCallbackPlaySource manages audio data supply to callback-based + * audio APIs such as JACK or CoreAudio. It maintains one ring buffer + * per channel, filled during playback by a non-realtime thread, and + * provides a method for a realtime thread to pick up the latest + * available sample data from these buffers. + */ +class AudioCallbackPlaySource : public QObject, + public AudioPlaySource, + public breakfastquay::ApplicationPlaybackSource +{ + Q_OBJECT + +public: + AudioCallbackPlaySource(ViewManagerBase *, QString clientName); + virtual ~AudioCallbackPlaySource(); + + /** + * Add a data model to be played from. The source can mix + * playback from a number of sources including dense and sparse + * models. The models must match in sample rate, but they don't + * have to have identical numbers of channels. + */ + virtual void addModel(Model *model); + + /** + * Remove a model. + */ + virtual void removeModel(Model *model); + + /** + * Remove all models. (Silence will ensue.) + */ + virtual void clearModels(); + + /** + * Start making data available in the ring buffers for playback, + * from the given frame. If playback is already under way, reseek + * to the given frame and continue. + */ + virtual void play(sv_frame_t startFrame) override; + + /** + * Stop playback and ensure that no more data is returned. + */ + virtual void stop() override; + + /** + * Return whether playback is currently supposed to be happening. + */ + virtual bool isPlaying() const override { return m_playing; } + + /** + * Return the frame number that is currently expected to be coming + * out of the speakers. (i.e. compensating for playback latency.) + */ + virtual sv_frame_t getCurrentPlayingFrame() override; + + /** + * Return the last frame that would come out of the speakers if we + * stopped playback right now. + */ + virtual sv_frame_t getCurrentBufferedFrame(); + + /** + * Return the frame at which playback is expected to end (if not looping). + */ + virtual sv_frame_t getPlayEndFrame() { return m_lastModelEndFrame; } + + /** + * Set the playback target. + */ + virtual void setSystemPlaybackTarget(breakfastquay::SystemPlaybackTarget *); + + /** + * Set the resampler wrapper, if one is in use. + */ + virtual void setResamplerWrapper(breakfastquay::ResamplerWrapper *); + + /** + * Set the block size of the target audio device. This should be + * called by the target class. + */ + virtual void setSystemPlaybackBlockSize(int blockSize) override; + + /** + * Get the block size of the target audio device. This may be an + * estimate or upper bound, if the target has a variable block + * size; the source should behave itself even if this value turns + * out to be inaccurate. + */ + virtual int getTargetBlockSize() const override; + + /** + * Set the playback latency of the target audio device, in frames + * at the device sample rate. This is the difference between the + * frame currently "leaving the speakers" and the last frame (or + * highest last frame across all channels) requested via + * getSamples(). The default is zero. + */ + virtual void setSystemPlaybackLatency(int) override; + + /** + * Get the playback latency of the target audio device. + */ + sv_frame_t getTargetPlayLatency() const; + + /** + * Specify that the target audio device has a fixed sample rate + * (i.e. cannot accommodate arbitrary sample rates based on the + * source). If the target sets this to something other than the + * source sample rate, this class will resample automatically to + * fit. + */ + virtual void setSystemPlaybackSampleRate(int) override; + + /** + * Return the sample rate set by the target audio device (or the + * source sample rate if the target hasn't set one). + */ + virtual sv_samplerate_t getDeviceSampleRate() const override; + + /** + * Indicate how many channels the target audio device was opened + * with. Note that the target device does channel mixing in the + * case where our requested channel count does not match its, so + * long as we provide the number of channels we specified when the + * target was started in getApplicationChannelCount(). + */ + virtual void setSystemPlaybackChannelCount(int) override; + + /** + * Set the current output levels for metering (for call from the + * target) + */ + virtual void setOutputLevels(float left, float right) override; + + /** + * Return the current output levels in the range 0.0 -> 1.0, for + * metering purposes. The values returned are the peak values + * since the last time this function was called (after which they + * are reset to zero until setOutputLevels is called again by the + * driver). + * + * Return true if the values have been set since this function was + * last called (i.e. if they are meaningful). Return false if they + * have not been set (in which case both will be zero). + */ + virtual bool getOutputLevels(float &left, float &right) override; + + /** + * Get the number of channels of audio that in the source models. + * This may safely be called from a realtime thread. Returns 0 if + * there is no source yet available. + */ + int getSourceChannelCount() const; + + /** + * Get the number of channels of audio that will be provided + * to the play target. This may be more than the source channel + * count: for example, a mono source will provide 2 channels + * after pan. + * + * This may safely be called from a realtime thread. Returns 0 if + * there is no source yet available. + * + * override from AudioPlaySource + */ + virtual int getTargetChannelCount() const override; + + /** + * Get the number of channels of audio the device is + * expecting. Equal to whatever getTargetChannelCount() was + * returning at the time the device was initialised. + */ + int getDeviceChannelCount() const; + + /** + * ApplicationPlaybackSource equivalent of the above. + * + * override from breakfastquay::ApplicationPlaybackSource + */ + virtual int getApplicationChannelCount() const override { + return getTargetChannelCount(); + } + + /** + * Get the actual sample rate of the source material (the main + * model). This may safely be called from a realtime thread. + * Returns 0 if there is no source yet available. + * + * When this changes, the AudioCallbackPlaySource notifies its + * ResamplerWrapper of the new sample rate so that it can resample + * correctly on the way to the device (which is opened at a fixed + * rate, see getApplicationSampleRate). + */ + virtual sv_samplerate_t getSourceSampleRate() const override; + + /** + * ApplicationPlaybackSource interface method: get the sample rate + * at which the application wants the device to be opened. We + * always allow the device to open at its default rate, and then + * we resample if the audio is at a different rate. This avoids + * having to close and re-open the device to obtain consistent + * behaviour for consecutive sessions with different source rates. + */ + virtual int getApplicationSampleRate() const override { + return 0; + } + + /** + * Get "count" samples (at the target sample rate) of the mixed + * audio data, in all channels. This may safely be called from a + * realtime thread. + */ + virtual int getSourceSamples(float *const *buffer, int nchannels, int count) override; + + /** + * Set the time stretcher factor (i.e. playback speed). + */ + void setTimeStretch(double factor); + + /** + * Set a single real-time plugin as a processing effect for + * auditioning during playback. + * + * The plugin must have been initialised with + * getTargetChannelCount() channels and a getTargetBlockSize() + * sample frame processing block size. + * + * This playback source takes ownership of the plugin, which will + * be deleted at some point after the following call to + * setAuditioningEffect (depending on real-time constraints). + * + * Pass a null pointer to remove the current auditioning plugin, + * if any. + */ + virtual void setAuditioningEffect(Auditionable *plugin) override; + + /** + * Specify that only the given set of models should be played. + */ + void setSoloModelSet(std::set<Model *>s); + + /** + * Specify that all models should be played as normal (if not + * muted). + */ + void clearSoloModelSet(); + + virtual std::string getClientName() const override { + return m_clientName; + } + +signals: + void playStatusChanged(bool isPlaying); + + void sampleRateMismatch(sv_samplerate_t requested, + sv_samplerate_t available, + bool willResample); + + void channelCountIncreased(int count); // target channel count (see getTargetChannelCount()) + + void audioOverloadPluginDisabled(); + void audioTimeStretchMultiChannelDisabled(); + + void activity(QString); + +public slots: + void audioProcessingOverload() override; + +protected slots: + void selectionChanged(); + void playLoopModeChanged(); + void playSelectionModeChanged(); + void playParametersChanged(PlayParameters *); + void preferenceChanged(PropertyContainer::PropertyName); + void modelChangedWithin(sv_frame_t startFrame, sv_frame_t endFrame); + +protected: + ViewManagerBase *m_viewManager; + AudioGenerator *m_audioGenerator; + std::string m_clientName; + + class RingBufferVector : public std::vector<RingBuffer<float> *> { + public: + virtual ~RingBufferVector() { + while (!empty()) { + delete *begin(); + erase(begin()); + } + } + }; + + std::set<Model *> m_models; + RingBufferVector *m_readBuffers; + RingBufferVector *m_writeBuffers; + sv_frame_t m_readBufferFill; + sv_frame_t m_writeBufferFill; + Scavenger<RingBufferVector> m_bufferScavenger; + int m_sourceChannelCount; + sv_frame_t m_blockSize; + sv_samplerate_t m_sourceSampleRate; + sv_samplerate_t m_deviceSampleRate; + int m_deviceChannelCount; + sv_frame_t m_playLatency; + breakfastquay::SystemPlaybackTarget *m_target; + double m_lastRetrievalTimestamp; + sv_frame_t m_lastRetrievedBlockSize; + bool m_trustworthyTimestamps; + sv_frame_t m_lastCurrentFrame; + bool m_playing; + bool m_exiting; + sv_frame_t m_lastModelEndFrame; + int m_ringBufferSize; + float m_outputLeft; + float m_outputRight; + bool m_levelsSet; + RealTimePluginInstance *m_auditioningPlugin; + bool m_auditioningPluginBypassed; + Scavenger<RealTimePluginInstance> m_pluginScavenger; + sv_frame_t m_playStartFrame; + bool m_playStartFramePassed; + RealTime m_playStartedAt; + + RingBuffer<float> *getWriteRingBuffer(int c) { + if (m_writeBuffers && c < (int)m_writeBuffers->size()) { + return (*m_writeBuffers)[c]; + } else { + return 0; + } + } + + RingBuffer<float> *getReadRingBuffer(int c) { + RingBufferVector *rb = m_readBuffers; + if (rb && c < (int)rb->size()) { + return (*rb)[c]; + } else { + return 0; + } + } + + void clearRingBuffers(bool haveLock = false, int count = 0); + void unifyRingBuffers(); + + RubberBand::RubberBandStretcher *m_timeStretcher; + RubberBand::RubberBandStretcher *m_monoStretcher; + double m_stretchRatio; + bool m_stretchMono; + + int m_stretcherInputCount; + float **m_stretcherInputs; + sv_frame_t *m_stretcherInputSizes; + + // Called from fill thread, m_playing true, mutex held + // Return true if work done + bool fillBuffers(); + + // Called from fillBuffers. Return the number of frames written, + // which will be count or fewer. Return in the frame argument the + // new buffered frame position (which may be earlier than the + // frame argument passed in, in the case of looping). + sv_frame_t mixModels(sv_frame_t &frame, sv_frame_t count, float **buffers); + + // Called from getSourceSamples. + void applyAuditioningEffect(sv_frame_t count, float *const *buffers); + + // Ranges of current selections, if play selection is active + std::vector<RealTime> m_rangeStarts; + std::vector<RealTime> m_rangeDurations; + void rebuildRangeLists(); + + sv_frame_t getCurrentFrame(RealTime outputLatency); + + class FillThread : public Thread + { + public: + FillThread(AudioCallbackPlaySource &source) : + Thread(Thread::NonRTThread), + m_source(source) { } + + virtual void run(); + + protected: + AudioCallbackPlaySource &m_source; + }; + + QMutex m_mutex; + QWaitCondition m_condition; + FillThread *m_fillThread; + breakfastquay::ResamplerWrapper *m_resamplerWrapper; // I don't own this +}; + +#endif + +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/audio/AudioCallbackRecordTarget.cpp Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,322 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#include "AudioCallbackRecordTarget.h" + +#include "base/ViewManagerBase.h" +#include "base/TempDirectory.h" + +#include "data/model/WritableWaveFileModel.h" + +#include <QDir> +#include <QTimer> + +AudioCallbackRecordTarget::AudioCallbackRecordTarget(ViewManagerBase *manager, + QString clientName) : + m_viewManager(manager), + m_clientName(clientName.toUtf8().data()), + m_recording(false), + m_recordSampleRate(44100), + m_recordChannelCount(2), + m_frameCount(0), + m_model(0), + m_buffers(0), + m_bufferCount(0), + m_inputLeft(0.f), + m_inputRight(0.f), + m_levelsSet(false) +{ + m_viewManager->setAudioRecordTarget(this); + + connect(this, SIGNAL(recordStatusChanged(bool)), + m_viewManager, SLOT(recordStatusChanged(bool))); + + recreateBuffers(); +} + +AudioCallbackRecordTarget::~AudioCallbackRecordTarget() +{ + m_viewManager->setAudioRecordTarget(0); + + QMutexLocker locker(&m_bufPtrMutex); + for (int c = 0; c < m_bufferCount; ++c) { + delete m_buffers[c]; + } + delete[] m_buffers; +} + +void +AudioCallbackRecordTarget::recreateBuffers() +{ + static int bufferSize = 441000; + + int count = m_recordChannelCount; + + if (count > m_bufferCount) { + + RingBuffer<float> **newBuffers = new RingBuffer<float> *[count]; + for (int c = 0; c < m_bufferCount; ++c) { + newBuffers[c] = m_buffers[c]; + } + for (int c = m_bufferCount; c < count; ++c) { + newBuffers[c] = new RingBuffer<float>(bufferSize); + } + + // This is the only place where m_buffers is rewritten and + // should be the only possible source of contention against + // putSamples for this mutex (as the model-updating code is + // supposed to run in the same thread as this) + QMutexLocker locker(&m_bufPtrMutex); + delete[] m_buffers; + m_buffers = newBuffers; + m_bufferCount = count; + } +} + +int +AudioCallbackRecordTarget::getApplicationSampleRate() const +{ + return 0; // don't care +} + +int +AudioCallbackRecordTarget::getApplicationChannelCount() const +{ + return m_recordChannelCount; +} + +void +AudioCallbackRecordTarget::setSystemRecordBlockSize(int) +{ +} + +void +AudioCallbackRecordTarget::setSystemRecordSampleRate(int n) +{ + m_recordSampleRate = n; +} + +void +AudioCallbackRecordTarget::setSystemRecordLatency(int) +{ +} + +void +AudioCallbackRecordTarget::setSystemRecordChannelCount(int c) +{ + m_recordChannelCount = c; + recreateBuffers(); +} + +void +AudioCallbackRecordTarget::putSamples(const float *const *samples, int, int nframes) +{ + // This may be called from RT context, and in a different thread + // from everything else in this class. It takes a mutex that + // should almost never be contended (see recreateBuffers()) + if (!m_recording) return; + + QMutexLocker locker(&m_bufPtrMutex); + if (m_buffers && m_bufferCount >= m_recordChannelCount) { + for (int c = 0; c < m_recordChannelCount; ++c) { + m_buffers[c]->write(samples[c], nframes); + } + } +} + +void +AudioCallbackRecordTarget::updateModel() +{ + bool secChanged = false; + sv_frame_t frameToEmit = 0; + + int nframes = 0; + for (int c = 0; c < m_recordChannelCount; ++c) { + if (c == 0 || m_buffers[c]->getReadSpace() < nframes) { + nframes = m_buffers[c]->getReadSpace(); + } + } + + if (nframes == 0) { + return; + } + + float **samples = new float *[m_recordChannelCount]; + for (int c = 0; c < m_recordChannelCount; ++c) { + samples[c] = new float[nframes]; + m_buffers[c]->read(samples[c], nframes); + } + + m_model->addSamples(samples, nframes); + + for (int c = 0; c < m_recordChannelCount; ++c) { + delete[] samples[c]; + } + delete[] samples; + + sv_frame_t priorFrameCount = m_frameCount; + m_frameCount += nframes; + + RealTime priorRT = + RealTime::frame2RealTime(priorFrameCount, m_recordSampleRate); + + RealTime postRT = + RealTime::frame2RealTime(m_frameCount, m_recordSampleRate); + + secChanged = (postRT.sec > priorRT.sec); + if (secChanged) { + m_model->updateModel(); + frameToEmit = m_frameCount; + } + + if (secChanged) { + emit recordDurationChanged(frameToEmit, m_recordSampleRate); + } + + if (m_recording) { + QTimer::singleShot(1000, this, SLOT(updateModel())); + } +} + +void +AudioCallbackRecordTarget::setInputLevels(float left, float right) +{ + if (left > m_inputLeft) m_inputLeft = left; + if (right > m_inputRight) m_inputRight = right; + m_levelsSet = true; +} + +bool +AudioCallbackRecordTarget::getInputLevels(float &left, float &right) +{ + left = m_inputLeft; + right = m_inputRight; + bool valid = m_levelsSet; + m_inputLeft = 0.f; + m_inputRight = 0.f; + m_levelsSet = false; + return valid; +} + +void +AudioCallbackRecordTarget::modelAboutToBeDeleted() +{ + if (sender() == m_model) { + m_model = 0; + m_recording = false; + } +} + +QString +AudioCallbackRecordTarget::getRecordContainerFolder() +{ + QDir parent(TempDirectory::getInstance()->getContainingPath()); + QString subdirname("recorded"); + + if (!parent.mkpath(subdirname)) { + SVCERR << "ERROR: AudioCallbackRecordTarget::getRecordContainerFolder: Failed to create recorded dir in \"" << parent.canonicalPath() << "\"" << endl; + return ""; + } else { + return parent.filePath(subdirname); + } +} + +QString +AudioCallbackRecordTarget::getRecordFolder() +{ + QDir parent(getRecordContainerFolder()); + QDateTime now = QDateTime::currentDateTime(); + QString subdirname = QString("%1").arg(now.toString("yyyyMMdd")); + + if (!parent.mkpath(subdirname)) { + SVCERR << "ERROR: AudioCallbackRecordTarget::getRecordFolder: Failed to create recorded dir in \"" << parent.canonicalPath() << "\"" << endl; + return ""; + } else { + return parent.filePath(subdirname); + } +} + +WritableWaveFileModel * +AudioCallbackRecordTarget::startRecording() +{ + if (m_recording) { + SVCERR << "WARNING: AudioCallbackRecordTarget::startRecording: We are already recording" << endl; + return 0; + } + + m_model = 0; + m_frameCount = 0; + + QString folder = getRecordFolder(); + if (folder == "") return 0; + QDir recordedDir(folder); + + QDateTime now = QDateTime::currentDateTime(); + + // Don't use QDateTime::toString(Qt::ISODate) as the ":" character + // isn't permitted in filenames on Windows + QString nowString = now.toString("yyyyMMdd-HHmmss-zzz"); + + QString filename = tr("recorded-%1.wav").arg(nowString); + QString label = tr("Recorded %1").arg(nowString); + + m_audioFileName = recordedDir.filePath(filename); + + m_model = new WritableWaveFileModel(m_recordSampleRate, + m_recordChannelCount, + m_audioFileName); + + if (!m_model->isOK()) { + SVCERR << "ERROR: AudioCallbackRecordTarget::startRecording: Recording failed" + << endl; + //!!! and throw? + delete m_model; + m_model = 0; + return 0; + } + + m_model->setObjectName(label); + m_recording = true; + + emit recordStatusChanged(true); + + QTimer::singleShot(1000, this, SLOT(updateModel())); + + return m_model; +} + +void +AudioCallbackRecordTarget::stopRecording() +{ + if (!m_recording) { + SVCERR << "WARNING: AudioCallbackRecordTarget::startRecording: Not recording" << endl; + return; + } + + m_recording = false; + + m_bufPtrMutex.lock(); + m_bufPtrMutex.unlock(); + + // buffers should now be up-to-date + updateModel(); + + m_model->writeComplete(); + m_model = 0; + + emit recordStatusChanged(false); + emit recordCompleted(); +} + +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/audio/AudioCallbackRecordTarget.h Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,110 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#ifndef SV_AUDIO_CALLBACK_RECORD_TARGET_H +#define SV_AUDIO_CALLBACK_RECORD_TARGET_H + +#include "base/AudioRecordTarget.h" + +#include <bqaudioio/ApplicationRecordTarget.h> + +#include <string> +#include <atomic> + +#include <QObject> +#include <QMutex> + +#include "base/BaseTypes.h" +#include "base/RingBuffer.h" + +class ViewManagerBase; +class WritableWaveFileModel; + +class AudioCallbackRecordTarget : public QObject, + public AudioRecordTarget, + public breakfastquay::ApplicationRecordTarget +{ + Q_OBJECT + +public: + AudioCallbackRecordTarget(ViewManagerBase *, QString clientName); + virtual ~AudioCallbackRecordTarget(); + + virtual std::string getClientName() const override { return m_clientName; } + + virtual int getApplicationSampleRate() const override; + virtual int getApplicationChannelCount() const override; + + virtual void setSystemRecordBlockSize(int) override; + virtual void setSystemRecordSampleRate(int) override; + virtual void setSystemRecordLatency(int) override; + virtual void setSystemRecordChannelCount(int) override; + + virtual void putSamples(const float *const *samples, int nchannels, int nframes) override; + + virtual void setInputLevels(float peakLeft, float peakRight) override; + + virtual void audioProcessingOverload() override { } + + QString getRecordContainerFolder(); + QString getRecordFolder(); + + virtual bool isRecording() const override { return m_recording; } + virtual sv_frame_t getRecordDuration() const override { return m_frameCount; } + + /** + * Return the current input levels in the range 0.0 -> 1.0, for + * metering purposes. The values returned are the peak values + * since the last time this function was called (after which they + * are reset to zero until setInputLevels is called again by the + * driver). + * + * Return true if the values have been set since this function was + * last called (i.e. if they are meaningful). Return false if they + * have not been set (in which case both will be zero). + */ + virtual bool getInputLevels(float &left, float &right) override; + + WritableWaveFileModel *startRecording(); // caller takes ownership of model + void stopRecording(); + +signals: + void recordStatusChanged(bool recording); + void recordDurationChanged(sv_frame_t, sv_samplerate_t); // emitted occasionally + void recordCompleted(); + +protected slots: + void modelAboutToBeDeleted(); + void updateModel(); + +private: + ViewManagerBase *m_viewManager; + std::string m_clientName; + std::atomic_bool m_recording; + sv_samplerate_t m_recordSampleRate; + int m_recordChannelCount; + sv_frame_t m_frameCount; + QString m_audioFileName; + WritableWaveFileModel *m_model; + RingBuffer<float> **m_buffers; + QMutex m_bufPtrMutex; + int m_bufferCount; + float m_inputLeft; + float m_inputRight; + bool m_levelsSet; + + void recreateBuffers(); +}; + +#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/audio/AudioGenerator.cpp Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,710 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + This file copyright 2006 Chris Cannam. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#include "AudioGenerator.h" + +#include "base/TempDirectory.h" +#include "base/PlayParameters.h" +#include "base/PlayParameterRepository.h" +#include "base/Pitch.h" +#include "base/Exceptions.h" + +#include "data/model/NoteModel.h" +#include "data/model/FlexiNoteModel.h" +#include "data/model/DenseTimeValueModel.h" +#include "data/model/SparseTimeValueModel.h" +#include "data/model/SparseOneDimensionalModel.h" +#include "data/model/NoteData.h" + +#include "ClipMixer.h" +#include "ContinuousSynth.h" + +#include <iostream> +#include <cmath> + +#include <QDir> +#include <QFile> + +const sv_frame_t +AudioGenerator::m_processingBlockSize = 1024; + +QString +AudioGenerator::m_sampleDir = ""; + +//#define DEBUG_AUDIO_GENERATOR 1 + +AudioGenerator::AudioGenerator() : + m_sourceSampleRate(0), + m_targetChannelCount(1), + m_waveType(0), + m_soloing(false), + m_channelBuffer(0), + m_channelBufSiz(0), + m_channelBufCount(0) +{ + initialiseSampleDir(); + + connect(PlayParameterRepository::getInstance(), + SIGNAL(playClipIdChanged(const Playable *, QString)), + this, + SLOT(playClipIdChanged(const Playable *, QString))); +} + +AudioGenerator::~AudioGenerator() +{ +#ifdef DEBUG_AUDIO_GENERATOR + SVDEBUG << "AudioGenerator::~AudioGenerator" << endl; +#endif +} + +void +AudioGenerator::initialiseSampleDir() +{ + if (m_sampleDir != "") return; + + try { + m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples"); + } catch (DirectoryCreationFailed f) { + cerr << "WARNING: AudioGenerator::initialiseSampleDir:" + << " Failed to create temporary sample directory" + << endl; + m_sampleDir = ""; + return; + } + + QDir sampleResourceDir(":/samples", "*.wav"); + + for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) { + + QString fileName(sampleResourceDir[i]); + QFile file(sampleResourceDir.filePath(fileName)); + QString target = QDir(m_sampleDir).filePath(fileName); + + if (!file.copy(target)) { + cerr << "WARNING: AudioGenerator::getSampleDir: " + << "Unable to copy " << fileName + << " into temporary directory \"" + << m_sampleDir << "\"" << endl; + } else { + QFile tf(target); + tf.setPermissions(tf.permissions() | + QFile::WriteOwner | + QFile::WriteUser); + } + } +} + +bool +AudioGenerator::addModel(Model *model) +{ + if (m_sourceSampleRate == 0) { + + m_sourceSampleRate = model->getSampleRate(); + + } else { + + DenseTimeValueModel *dtvm = + dynamic_cast<DenseTimeValueModel *>(model); + + if (dtvm) { + m_sourceSampleRate = model->getSampleRate(); + return true; + } + } + + const Playable *playable = model; + if (!playable || !playable->canPlay()) return 0; + + PlayParameters *parameters = + PlayParameterRepository::getInstance()->getPlayParameters(playable); + + bool willPlay = !parameters->isPlayMuted(); + + if (usesClipMixer(model)) { + ClipMixer *mixer = makeClipMixerFor(model); + if (mixer) { + QMutexLocker locker(&m_mutex); + m_clipMixerMap[model] = mixer; + return willPlay; + } + } + + if (usesContinuousSynth(model)) { + ContinuousSynth *synth = makeSynthFor(model); + if (synth) { + QMutexLocker locker(&m_mutex); + m_continuousSynthMap[model] = synth; + return willPlay; + } + } + + return false; +} + +void +AudioGenerator::playClipIdChanged(const Playable *playable, QString) +{ + const Model *model = dynamic_cast<const Model *>(playable); + if (!model) { + cerr << "WARNING: AudioGenerator::playClipIdChanged: playable " + << playable << " is not a supported model type" + << endl; + return; + } + + if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return; + + ClipMixer *mixer = makeClipMixerFor(model); + if (mixer) { + QMutexLocker locker(&m_mutex); + m_clipMixerMap[model] = mixer; + } +} + +bool +AudioGenerator::usesClipMixer(const Model *model) +{ + bool clip = + (qobject_cast<const SparseOneDimensionalModel *>(model) || + qobject_cast<const NoteModel *>(model) || + qobject_cast<const FlexiNoteModel *>(model)); + return clip; +} + +bool +AudioGenerator::wantsQuieterClips(const Model *model) +{ + // basically, anything that usually has sustain (like notes) or + // often has multiple sounds at once (like notes) wants to use a + // quieter level than simple click tracks + bool does = + (qobject_cast<const NoteModel *>(model) || + qobject_cast<const FlexiNoteModel *>(model)); + return does; +} + +bool +AudioGenerator::usesContinuousSynth(const Model *model) +{ + bool cont = + (qobject_cast<const SparseTimeValueModel *>(model)); + return cont; +} + +ClipMixer * +AudioGenerator::makeClipMixerFor(const Model *model) +{ + QString clipId; + + const Playable *playable = model; + if (!playable || !playable->canPlay()) return 0; + + PlayParameters *parameters = + PlayParameterRepository::getInstance()->getPlayParameters(playable); + if (parameters) { + clipId = parameters->getPlayClipId(); + } + +#ifdef DEBUG_AUDIO_GENERATOR + std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl; +#endif + + if (clipId == "") { + SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl; + return 0; + } + + ClipMixer *mixer = new ClipMixer(m_targetChannelCount, + m_sourceSampleRate, + m_processingBlockSize); + + double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required + + QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId); + + double level = wantsQuieterClips(model) ? 0.5 : 1.0; + if (!mixer->loadClipData(clipPath, clipF0, level)) { + delete mixer; + return 0; + } + +#ifdef DEBUG_AUDIO_GENERATOR + std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl; +#endif + + return mixer; +} + +ContinuousSynth * +AudioGenerator::makeSynthFor(const Model *model) +{ + const Playable *playable = model; + if (!playable || !playable->canPlay()) return 0; + + ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount, + m_sourceSampleRate, + m_processingBlockSize, + m_waveType); + +#ifdef DEBUG_AUDIO_GENERATOR + std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl; +#endif + + return synth; +} + +void +AudioGenerator::removeModel(Model *model) +{ + SparseOneDimensionalModel *sodm = + dynamic_cast<SparseOneDimensionalModel *>(model); + if (!sodm) return; // nothing to do + + QMutexLocker locker(&m_mutex); + + if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return; + + ClipMixer *mixer = m_clipMixerMap[sodm]; + m_clipMixerMap.erase(sodm); + delete mixer; +} + +void +AudioGenerator::clearModels() +{ + QMutexLocker locker(&m_mutex); + + while (!m_clipMixerMap.empty()) { + ClipMixer *mixer = m_clipMixerMap.begin()->second; + m_clipMixerMap.erase(m_clipMixerMap.begin()); + delete mixer; + } +} + +void +AudioGenerator::reset() +{ + QMutexLocker locker(&m_mutex); + +#ifdef DEBUG_AUDIO_GENERATOR + cerr << "AudioGenerator::reset()" << endl; +#endif + + for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { + if (i->second) { + i->second->reset(); + } + } + + m_noteOffs.clear(); +} + +void +AudioGenerator::setTargetChannelCount(int targetChannelCount) +{ + if (m_targetChannelCount == targetChannelCount) return; + +// SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl; + + QMutexLocker locker(&m_mutex); + m_targetChannelCount = targetChannelCount; + + for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { + if (i->second) i->second->setChannelCount(targetChannelCount); + } +} + +sv_frame_t +AudioGenerator::getBlockSize() const +{ + return m_processingBlockSize; +} + +void +AudioGenerator::setSoloModelSet(std::set<Model *> s) +{ + QMutexLocker locker(&m_mutex); + + m_soloModelSet = s; + m_soloing = true; +} + +void +AudioGenerator::clearSoloModelSet() +{ + QMutexLocker locker(&m_mutex); + + m_soloModelSet.clear(); + m_soloing = false; +} + +sv_frame_t +AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount, + float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut) +{ + if (m_sourceSampleRate == 0) { + cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; + return frameCount; + } + + QMutexLocker locker(&m_mutex); + + Playable *playable = model; + if (!playable || !playable->canPlay()) return frameCount; + + PlayParameters *parameters = + PlayParameterRepository::getInstance()->getPlayParameters(playable); + if (!parameters) return frameCount; + + bool playing = !parameters->isPlayMuted(); + if (!playing) { +#ifdef DEBUG_AUDIO_GENERATOR + cout << "AudioGenerator::mixModel(" << model << "): muted" << endl; +#endif + return frameCount; + } + + if (m_soloing) { + if (m_soloModelSet.find(model) == m_soloModelSet.end()) { +#ifdef DEBUG_AUDIO_GENERATOR + cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl; +#endif + return frameCount; + } + } + + float gain = parameters->getPlayGain(); + float pan = parameters->getPlayPan(); + + DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model); + if (dtvm) { + return mixDenseTimeValueModel(dtvm, startFrame, frameCount, + buffer, gain, pan, fadeIn, fadeOut); + } + + if (usesClipMixer(model)) { + return mixClipModel(model, startFrame, frameCount, + buffer, gain, pan); + } + + if (usesContinuousSynth(model)) { + return mixContinuousSynthModel(model, startFrame, frameCount, + buffer, gain, pan); + } + + std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; + + return frameCount; +} + +sv_frame_t +AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, + sv_frame_t startFrame, sv_frame_t frames, + float **buffer, float gain, float pan, + sv_frame_t fadeIn, sv_frame_t fadeOut) +{ + sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut); + + int modelChannels = dtvm->getChannelCount(); + + if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) { + + for (int c = 0; c < m_channelBufCount; ++c) { + delete[] m_channelBuffer[c]; + } + + delete[] m_channelBuffer; + m_channelBuffer = new float *[modelChannels]; + + for (int c = 0; c < modelChannels; ++c) { + m_channelBuffer[c] = new float[maxFrames]; + } + + m_channelBufCount = modelChannels; + m_channelBufSiz = maxFrames; + } + + sv_frame_t got = 0; + + if (startFrame >= fadeIn/2) { + + auto data = dtvm->getMultiChannelData(0, modelChannels - 1, + startFrame - fadeIn/2, + frames + fadeOut/2 + fadeIn/2); + + for (int c = 0; c < modelChannels; ++c) { + copy(data[c].begin(), data[c].end(), m_channelBuffer[c]); + } + + got = data[0].size(); + + } else { + sv_frame_t missing = fadeIn/2 - startFrame; + + if (missing > 0) { + cerr << "note: channelBufSiz = " << m_channelBufSiz + << ", frames + fadeOut/2 = " << frames + fadeOut/2 + << ", startFrame = " << startFrame + << ", missing = " << missing << endl; + } + + auto data = dtvm->getMultiChannelData(0, modelChannels - 1, + startFrame, + frames + fadeOut/2); + for (int c = 0; c < modelChannels; ++c) { + copy(data[c].begin(), data[c].end(), m_channelBuffer[c] + missing); + } + + got = data[0].size() + missing; + } + + for (int c = 0; c < m_targetChannelCount; ++c) { + + int sourceChannel = (c % modelChannels); + +// SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; + + float channelGain = gain; + if (pan != 0.0) { + if (c == 0) { + if (pan > 0.0) channelGain *= 1.0f - pan; + } else { + if (pan < 0.0) channelGain *= pan + 1.0f; + } + } + + for (sv_frame_t i = 0; i < fadeIn/2; ++i) { + float *back = buffer[c]; + back -= fadeIn/2; + back[i] += + (channelGain * m_channelBuffer[sourceChannel][i] * float(i)) + / float(fadeIn); + } + + for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) { + float mult = channelGain; + if (i < fadeIn/2) { + mult = (mult * float(i)) / float(fadeIn); + } + if (i > frames - fadeOut/2) { + mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut); + } + float val = m_channelBuffer[sourceChannel][i]; + if (i >= got) val = 0.f; + buffer[c][i] += mult * val; + } + } + + return got; +} + +sv_frame_t +AudioGenerator::mixClipModel(Model *model, + sv_frame_t startFrame, sv_frame_t frames, + float **buffer, float gain, float pan) +{ + ClipMixer *clipMixer = m_clipMixerMap[model]; + if (!clipMixer) return 0; + + int blocks = int(frames / m_processingBlockSize); + + //!!! todo: the below -- it matters + + //!!! hang on -- the fact that the audio callback play source's + //buffer is a multiple of the plugin's buffer size doesn't mean + //that we always get called for a multiple of it here (because it + //also depends on the JACK block size). how should we ensure that + //all models write the same amount in to the mix, and that we + //always have a multiple of the plugin buffer size? I guess this + //class has to be queryable for the plugin buffer size & the + //callback play source has to use that as a multiple for all the + //calls to mixModel + + sv_frame_t got = blocks * m_processingBlockSize; + +#ifdef DEBUG_AUDIO_GENERATOR + cout << "mixModel [clip]: start " << startFrame << ", frames " << frames + << ", blocks " << blocks << ", have " << m_noteOffs.size() + << " note-offs" << endl; +#endif + + ClipMixer::NoteStart on; + ClipMixer::NoteEnd off; + + NoteOffSet ¬eOffs = m_noteOffs[model]; + + float **bufferIndexes = new float *[m_targetChannelCount]; + + for (int i = 0; i < blocks; ++i) { + + sv_frame_t reqStart = startFrame + i * m_processingBlockSize; + + NoteList notes; + NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); + if (exportable) { + notes = exportable->getNotesWithin(reqStart, + reqStart + m_processingBlockSize); + } + + std::vector<ClipMixer::NoteStart> starts; + std::vector<ClipMixer::NoteEnd> ends; + + for (NoteList::const_iterator ni = notes.begin(); + ni != notes.end(); ++ni) { + + sv_frame_t noteFrame = ni->start; + + if (noteFrame < reqStart || + noteFrame >= reqStart + m_processingBlockSize) continue; + + while (noteOffs.begin() != noteOffs.end() && + noteOffs.begin()->frame <= noteFrame) { + + sv_frame_t eventFrame = noteOffs.begin()->frame; + if (eventFrame < reqStart) eventFrame = reqStart; + + off.frameOffset = eventFrame - reqStart; + off.frequency = noteOffs.begin()->frequency; + +#ifdef DEBUG_AUDIO_GENERATOR + cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl; +#endif + + ends.push_back(off); + noteOffs.erase(noteOffs.begin()); + } + + on.frameOffset = noteFrame - reqStart; + on.frequency = ni->getFrequency(); + on.level = float(ni->velocity) / 127.0f; + on.pan = pan; + +#ifdef DEBUG_AUDIO_GENERATOR + cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl; +#endif + + starts.push_back(on); + noteOffs.insert + (NoteOff(on.frequency, noteFrame + ni->duration)); + } + + while (noteOffs.begin() != noteOffs.end() && + noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { + + sv_frame_t eventFrame = noteOffs.begin()->frame; + if (eventFrame < reqStart) eventFrame = reqStart; + + off.frameOffset = eventFrame - reqStart; + off.frequency = noteOffs.begin()->frequency; + +#ifdef DEBUG_AUDIO_GENERATOR + cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl; +#endif + + ends.push_back(off); + noteOffs.erase(noteOffs.begin()); + } + + for (int c = 0; c < m_targetChannelCount; ++c) { + bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; + } + + clipMixer->mix(bufferIndexes, gain, starts, ends); + } + + delete[] bufferIndexes; + + return got; +} + +sv_frame_t +AudioGenerator::mixContinuousSynthModel(Model *model, + sv_frame_t startFrame, + sv_frame_t frames, + float **buffer, + float gain, + float pan) +{ + ContinuousSynth *synth = m_continuousSynthMap[model]; + if (!synth) return 0; + + // only type we support here at the moment + SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); + if (stvm->getScaleUnits() != "Hz") return 0; + + int blocks = int(frames / m_processingBlockSize); + + //!!! todo: see comment in mixClipModel + + sv_frame_t got = blocks * m_processingBlockSize; + +#ifdef DEBUG_AUDIO_GENERATOR + cout << "mixModel [synth]: frames " << frames + << ", blocks " << blocks << endl; +#endif + + float **bufferIndexes = new float *[m_targetChannelCount]; + + for (int i = 0; i < blocks; ++i) { + + sv_frame_t reqStart = startFrame + i * m_processingBlockSize; + + for (int c = 0; c < m_targetChannelCount; ++c) { + bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; + } + + SparseTimeValueModel::PointList points = + stvm->getPoints(reqStart, reqStart + m_processingBlockSize); + + // by default, repeat last frequency + float f0 = 0.f; + + // go straight to the last freq that is genuinely in this range + for (SparseTimeValueModel::PointList::const_iterator itr = points.end(); + itr != points.begin(); ) { + --itr; + if (itr->frame >= reqStart && + itr->frame < reqStart + m_processingBlockSize) { + f0 = itr->value; + break; + } + } + + // if we found no such frequency and the next point is further + // away than twice the model resolution, go silent (same + // criterion TimeValueLayer uses for ending a discrete curve + // segment) + if (f0 == 0.f) { + SparseTimeValueModel::PointList nextPoints = + stvm->getNextPoints(reqStart + m_processingBlockSize); + if (nextPoints.empty() || + nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) { + f0 = -1.f; + } + } + +// cerr << "f0 = " << f0 << endl; + + synth->mix(bufferIndexes, + gain, + pan, + f0); + } + + delete[] bufferIndexes; + + return got; +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/audio/AudioGenerator.h Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,168 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + This file copyright 2006 Chris Cannam. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#ifndef _AUDIO_GENERATOR_H_ +#define _AUDIO_GENERATOR_H_ + +class Model; +class NoteModel; +class FlexiNoteModel; +class DenseTimeValueModel; +class SparseOneDimensionalModel; +class Playable; +class ClipMixer; +class ContinuousSynth; + +#include <QObject> +#include <QMutex> + +#include <set> +#include <map> +#include <vector> + +#include "base/BaseTypes.h" + +class AudioGenerator : public QObject +{ + Q_OBJECT + +public: + AudioGenerator(); + virtual ~AudioGenerator(); + + /** + * Add a data model to be played from and initialise any necessary + * audio generation code. Returns true if the model will be + * played. The model will be added regardless of the return + * value. + */ + virtual bool addModel(Model *model); + + /** + * Remove a model. + */ + virtual void removeModel(Model *model); + + /** + * Remove all models. + */ + virtual void clearModels(); + + /** + * Reset playback, clearing buffers and the like. + */ + virtual void reset(); + + /** + * Set the target channel count. The buffer parameter to mixModel + * must always point to at least this number of arrays. + */ + virtual void setTargetChannelCount(int channelCount); + + /** + * Return the internal processing block size. The frameCount + * argument to all mixModel calls must be a multiple of this + * value. + */ + virtual sv_frame_t getBlockSize() const; + + /** + * Mix a single model into an output buffer. + */ + virtual sv_frame_t mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount, + float **buffer, sv_frame_t fadeIn = 0, sv_frame_t fadeOut = 0); + + /** + * Specify that only the given set of models should be played. + */ + virtual void setSoloModelSet(std::set<Model *>s); + + /** + * Specify that all models should be played as normal (if not + * muted). + */ + virtual void clearSoloModelSet(); + +protected slots: + void playClipIdChanged(const Playable *, QString); + +protected: + sv_samplerate_t m_sourceSampleRate; + int m_targetChannelCount; + int m_waveType; + + bool m_soloing; + std::set<Model *> m_soloModelSet; + + struct NoteOff { + + NoteOff(float _freq, sv_frame_t _frame) : frequency(_freq), frame(_frame) { } + + float frequency; + sv_frame_t frame; + + struct Comparator { + bool operator()(const NoteOff &n1, const NoteOff &n2) const { + return n1.frame < n2.frame; + } + }; + }; + + + typedef std::map<const Model *, ClipMixer *> ClipMixerMap; + + typedef std::multiset<NoteOff, NoteOff::Comparator> NoteOffSet; + typedef std::map<const Model *, NoteOffSet> NoteOffMap; + + typedef std::map<const Model *, ContinuousSynth *> ContinuousSynthMap; + + QMutex m_mutex; + + ClipMixerMap m_clipMixerMap; + NoteOffMap m_noteOffs; + static QString m_sampleDir; + + ContinuousSynthMap m_continuousSynthMap; + + bool usesClipMixer(const Model *); + bool wantsQuieterClips(const Model *); + bool usesContinuousSynth(const Model *); + + ClipMixer *makeClipMixerFor(const Model *model); + ContinuousSynth *makeSynthFor(const Model *model); + + static void initialiseSampleDir(); + + virtual sv_frame_t mixDenseTimeValueModel + (DenseTimeValueModel *model, sv_frame_t startFrame, sv_frame_t frameCount, + float **buffer, float gain, float pan, sv_frame_t fadeIn, sv_frame_t fadeOut); + + virtual sv_frame_t mixClipModel + (Model *model, sv_frame_t startFrame, sv_frame_t frameCount, + float **buffer, float gain, float pan); + + virtual sv_frame_t mixContinuousSynthModel + (Model *model, sv_frame_t startFrame, sv_frame_t frameCount, + float **buffer, float gain, float pan); + + static const sv_frame_t m_processingBlockSize; + + float **m_channelBuffer; + sv_frame_t m_channelBufSiz; + int m_channelBufCount; +}; + +#endif +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/audio/ClipMixer.cpp Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,248 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + This file copyright 2006 Chris Cannam, 2006-2014 QMUL. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#include "ClipMixer.h" + +#include <sndfile.h> +#include <cmath> + +#include "base/Debug.h" + +//#define DEBUG_CLIP_MIXER 1 + +ClipMixer::ClipMixer(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize) : + m_channels(channels), + m_sampleRate(sampleRate), + m_blockSize(blockSize), + m_clipData(0), + m_clipLength(0), + m_clipF0(0), + m_clipRate(0) +{ +} + +ClipMixer::~ClipMixer() +{ + if (m_clipData) free(m_clipData); +} + +void +ClipMixer::setChannelCount(int channels) +{ + m_channels = channels; +} + +bool +ClipMixer::loadClipData(QString path, double f0, double level) +{ + if (m_clipData) { + cerr << "ClipMixer::loadClipData: Already have clip loaded" << endl; + return false; + } + + SF_INFO info; + SNDFILE *file; + float *tmpFrames; + sv_frame_t i; + + info.format = 0; + file = sf_open(path.toLocal8Bit().data(), SFM_READ, &info); + if (!file) { + cerr << "ClipMixer::loadClipData: Failed to open file path \"" + << path << "\": " << sf_strerror(file) << endl; + return false; + } + + tmpFrames = (float *)malloc(info.frames * info.channels * sizeof(float)); + if (!tmpFrames) { + cerr << "ClipMixer::loadClipData: malloc(" << info.frames * info.channels * sizeof(float) << ") failed" << endl; + return false; + } + + sf_readf_float(file, tmpFrames, info.frames); + sf_close(file); + + m_clipData = (float *)malloc(info.frames * sizeof(float)); + if (!m_clipData) { + cerr << "ClipMixer::loadClipData: malloc(" << info.frames * sizeof(float) << ") failed" << endl; + free(tmpFrames); + return false; + } + + for (i = 0; i < info.frames; ++i) { + int j; + m_clipData[i] = 0.0f; + for (j = 0; j < info.channels; ++j) { + m_clipData[i] += tmpFrames[i * info.channels + j] * float(level); + } + } + + free(tmpFrames); + + m_clipLength = info.frames; + m_clipF0 = f0; + m_clipRate = info.samplerate; + + return true; +} + +void +ClipMixer::reset() +{ + m_playing.clear(); +} + +double +ClipMixer::getResampleRatioFor(double frequency) +{ + if (!m_clipData || !m_clipRate) return 1.0; + double pitchRatio = m_clipF0 / frequency; + double resampleRatio = m_sampleRate / m_clipRate; + return pitchRatio * resampleRatio; +} + +sv_frame_t +ClipMixer::getResampledClipDuration(double frequency) +{ + return sv_frame_t(ceil(double(m_clipLength) * getResampleRatioFor(frequency))); +} + +void +ClipMixer::mix(float **toBuffers, + float gain, + std::vector<NoteStart> newNotes, + std::vector<NoteEnd> endingNotes) +{ + foreach (NoteStart note, newNotes) { + if (note.frequency > 20 && + note.frequency < 5000) { + m_playing.push_back(note); + } + } + + std::vector<NoteStart> remaining; + + float *levels = new float[m_channels]; + +#ifdef DEBUG_CLIP_MIXER + cerr << "ClipMixer::mix: have " << m_playing.size() << " playing note(s)" + << " and " << endingNotes.size() << " note(s) ending here" + << endl; +#endif + + foreach (NoteStart note, m_playing) { + + for (int c = 0; c < m_channels; ++c) { + levels[c] = note.level * gain; + } + if (note.pan != 0.0 && m_channels == 2) { + levels[0] *= 1.0f - note.pan; + levels[1] *= note.pan + 1.0f; + } + + sv_frame_t start = note.frameOffset; + sv_frame_t durationHere = m_blockSize; + if (start > 0) durationHere = m_blockSize - start; + + bool ending = false; + + foreach (NoteEnd end, endingNotes) { + if (end.frequency == note.frequency && + end.frameOffset >= start && + end.frameOffset <= m_blockSize) { + ending = true; + durationHere = end.frameOffset; + if (start > 0) durationHere = end.frameOffset - start; + break; + } + } + + sv_frame_t clipDuration = getResampledClipDuration(note.frequency); + if (start + clipDuration > 0) { + if (start < 0 && start + clipDuration < durationHere) { + durationHere = start + clipDuration; + } + if (durationHere > 0) { + mixNote(toBuffers, + levels, + note.frequency, + start < 0 ? -start : 0, + start > 0 ? start : 0, + durationHere, + ending); + } + } + + if (!ending) { + NoteStart adjusted = note; + adjusted.frameOffset -= m_blockSize; + remaining.push_back(adjusted); + } + } + + delete[] levels; + + m_playing = remaining; +} + +void +ClipMixer::mixNote(float **toBuffers, + float *levels, + float frequency, + sv_frame_t sourceOffset, + sv_frame_t targetOffset, + sv_frame_t sampleCount, + bool isEnd) +{ + if (!m_clipData) return; + + double ratio = getResampleRatioFor(frequency); + + double releaseTime = 0.01; + sv_frame_t releaseSampleCount = sv_frame_t(round(releaseTime * m_sampleRate)); + if (releaseSampleCount > sampleCount) { + releaseSampleCount = sampleCount; + } + double releaseFraction = 1.0/double(releaseSampleCount); + + for (sv_frame_t i = 0; i < sampleCount; ++i) { + + sv_frame_t s = sourceOffset + i; + + double os = double(s) / ratio; + sv_frame_t osi = sv_frame_t(floor(os)); + + //!!! just linear interpolation for now (same as SV's sample + //!!! player). a small sinc kernel would be better and + //!!! probably "good enough" + double value = 0.0; + if (osi < m_clipLength) { + value += m_clipData[osi]; + } + if (osi + 1 < m_clipLength) { + value += (m_clipData[osi + 1] - m_clipData[osi]) * (os - double(osi)); + } + + if (isEnd && i + releaseSampleCount > sampleCount) { + value *= releaseFraction * double(sampleCount - i); // linear ramp for release + } + + for (int c = 0; c < m_channels; ++c) { + toBuffers[c][targetOffset + i] += float(levels[c] * value); + } + } +} + +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/audio/ClipMixer.h Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,94 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + This file copyright 2006 Chris Cannam, 2006-2014 QMUL. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#ifndef CLIP_MIXER_H +#define CLIP_MIXER_H + +#include <QString> +#include <vector> + +#include "base/BaseTypes.h" + +/** + * Mix in synthetic notes produced by resampling a prerecorded + * clip. (i.e. this is an implementation of a digital sampler in the + * musician's sense.) This can mix any number of notes of arbitrary + * frequency, so long as they all use the same sample clip. + */ + +class ClipMixer +{ +public: + ClipMixer(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize); + ~ClipMixer(); + + void setChannelCount(int channels); + + /** + * Load a sample clip from a wav file. This can only happen once: + * construct a new ClipMixer if you want a different clip. The + * clip was recorded at a pitch with fundamental frequency clipF0, + * and should be scaled by level (in the range 0-1) when playing + * back. + */ + bool loadClipData(QString clipFilePath, double clipF0, double level); + + void reset(); // discarding any playing notes + + struct NoteStart { + sv_frame_t frameOffset; // within current processing block + float frequency; // Hz + float level; // volume in range (0,1] + float pan; // range [-1,1] + }; + + struct NoteEnd { + sv_frame_t frameOffset; // in current processing block + float frequency; // matching note start + }; + + void mix(float **toBuffers, + float gain, + std::vector<NoteStart> newNotes, + std::vector<NoteEnd> endingNotes); + +private: + int m_channels; + sv_samplerate_t m_sampleRate; + sv_frame_t m_blockSize; + + QString m_clipPath; + + float *m_clipData; + sv_frame_t m_clipLength; + double m_clipF0; + sv_samplerate_t m_clipRate; + + std::vector<NoteStart> m_playing; + + double getResampleRatioFor(double frequency); + sv_frame_t getResampledClipDuration(double frequency); + + void mixNote(float **toBuffers, + float *levels, + float frequency, + sv_frame_t sourceOffset, // within resampled note + sv_frame_t targetOffset, // within target buffer + sv_frame_t sampleCount, + bool isEnd); +}; + + +#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/audio/ContinuousSynth.cpp Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,149 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#include "ContinuousSynth.h" + +#include "base/Debug.h" +#include "system/System.h" + +#include <cmath> + +ContinuousSynth::ContinuousSynth(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize, int waveType) : + m_channels(channels), + m_sampleRate(sampleRate), + m_blockSize(blockSize), + m_prevF0(-1.0), + m_phase(0.0), + m_wavetype(waveType) // 0: 3 sinusoids, 1: 1 sinusoid, 2: sawtooth, 3: square +{ +} + +ContinuousSynth::~ContinuousSynth() +{ +} + +void +ContinuousSynth::reset() +{ + m_phase = 0; +} + +void +ContinuousSynth::mix(float **toBuffers, float gain, float pan, float f0f) +{ + double f0(f0f); + if (f0 == 0.0) f0 = m_prevF0; + + bool wasOn = (m_prevF0 > 0.0); + bool nowOn = (f0 > 0.0); + + if (!nowOn && !wasOn) { + m_phase = 0; + return; + } + + sv_frame_t fadeLength = 100; + + float *levels = new float[m_channels]; + + for (int c = 0; c < m_channels; ++c) { + levels[c] = gain * 0.5f; // scale gain otherwise too loud compared to source + } + if (pan != 0.0 && m_channels == 2) { + levels[0] *= 1.0f - pan; + levels[1] *= pan + 1.0f; + } + +// cerr << "ContinuousSynth::mix: f0 = " << f0 << " (from " << m_prevF0 << "), phase = " << m_phase << endl; + + for (sv_frame_t i = 0; i < m_blockSize; ++i) { + + double fHere = (nowOn ? f0 : m_prevF0); + + if (wasOn && nowOn && (f0 != m_prevF0) && (i < fadeLength)) { + // interpolate the frequency shift + fHere = m_prevF0 + ((f0 - m_prevF0) * double(i)) / double(fadeLength); + } + + double phasor = (fHere * 2 * M_PI) / m_sampleRate; + + m_phase = m_phase + phasor; + + int harmonics = int((m_sampleRate / 4) / fHere - 1); + if (harmonics < 1) harmonics = 1; + + switch (m_wavetype) { + case 1: + harmonics = 1; + break; + case 2: + break; + case 3: + break; + default: + harmonics = 3; + break; + } + + for (int h = 0; h < harmonics; ++h) { + + double v = 0; + double hn = 0; + double hp = 0; + + switch (m_wavetype) { + case 1: // single sinusoid + v = sin(m_phase); + break; + case 2: // sawtooth + if (h != 0) { + hn = h + 1; + hp = m_phase * hn; + v = -(1.0 / M_PI) * sin(hp) / hn; + } else { + v = 0.5; + } + break; + case 3: // square + hn = h*2 + 1; + hp = m_phase * hn; + v = sin(hp) / hn; + break; + default: // 3 sinusoids + hn = h + 1; + hp = m_phase * hn; + v = sin(hp) / hn; + break; + } + + if (!wasOn && i < fadeLength) { + // fade in + v = v * (double(i) / double(fadeLength)); + } else if (!nowOn) { + // fade out + if (i > fadeLength) v = 0; + else v = v * (1.0 - (double(i) / double(fadeLength))); + } + + for (int c = 0; c < m_channels; ++c) { + toBuffers[c][i] += float(levels[c] * v); + } + } + } + + m_prevF0 = f0; + + delete[] levels; +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/audio/ContinuousSynth.h Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,65 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#ifndef CONTINUOUS_SYNTH_H +#define CONTINUOUS_SYNTH_H + +#include "base/BaseTypes.h" + +/** + * Mix into a target buffer a signal synthesised so as to sound at a + * specific frequency. The frequency may change with each processing + * block, or may be switched on or off. + */ + +class ContinuousSynth +{ +public: + ContinuousSynth(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize, int waveType); + ~ContinuousSynth(); + + void setChannelCount(int channels); + + void reset(); + + /** + * Mix in a signal to be heard at the given fundamental + * frequency. Any oscillator state will be maintained between + * process calls so as to provide a continuous sound. The f0 value + * may vary between calls. + * + * Supply f0 equal to 0 if you want to maintain the f0 from the + * previous block (without having to remember what it was). + * + * Supply f0 less than 0 for silence. You should continue to call + * this even when the signal is silent if you want to ensure the + * sound switches on and off cleanly. + */ + void mix(float **toBuffers, + float gain, + float pan, + float f0); + +private: + int m_channels; + sv_samplerate_t m_sampleRate; + sv_frame_t m_blockSize; + + double m_prevF0; + double m_phase; + + int m_wavetype; +}; + +#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/audio/PlaySpeedRangeMapper.cpp Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,101 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + This file copyright 2006 QMUL. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#include "PlaySpeedRangeMapper.h" + +#include <iostream> +#include <cmath> + +// PlaySpeedRangeMapper maps a position in the range [0,120] on to a +// play speed factor on a logarithmic scale in the range 0.125 -> +// 8. This ensures that the desirable speed factors 0.25, 0.5, 1, 2, +// and 4 are all mapped to exact positions (respectively 20, 40, 60, +// 80, 100). + +// Note that the "factor" referred to below is a play speed factor +// (higher = faster, 1.0 = normal speed), the "value" is a percentage +// (higher = faster, 100 = normal speed), and the "position" is an +// integer step on the dial's scale (0-120, 60 = centre). + +PlaySpeedRangeMapper::PlaySpeedRangeMapper() : + m_minpos(0), + m_maxpos(120) +{ +} + +int +PlaySpeedRangeMapper::getPositionForValue(double value) const +{ + // value is percent + double factor = getFactorForValue(value); + int position = getPositionForFactor(factor); + return position; +} + +int +PlaySpeedRangeMapper::getPositionForValueUnclamped(double value) const +{ + // We don't really provide this + return getPositionForValue(value); +} + +double +PlaySpeedRangeMapper::getValueForPosition(int position) const +{ + double factor = getFactorForPosition(position); + double pc = getValueForFactor(factor); + return pc; +} + +double +PlaySpeedRangeMapper::getValueForPositionUnclamped(int position) const +{ + // We don't really provide this + return getValueForPosition(position); +} + +double +PlaySpeedRangeMapper::getValueForFactor(double factor) const +{ + return factor * 100.0; +} + +double +PlaySpeedRangeMapper::getFactorForValue(double value) const +{ + return value / 100.0; +} + +int +PlaySpeedRangeMapper::getPositionForFactor(double factor) const +{ + if (factor == 0) return m_minpos; + int pos = int(lrint((log2(factor) + 3.0) * 20.0)); + if (pos < m_minpos) pos = m_minpos; + if (pos > m_maxpos) pos = m_maxpos; + return pos; +} + +double +PlaySpeedRangeMapper::getFactorForPosition(int position) const +{ + return pow(2.0, double(position) * 0.05 - 3.0); +} + +QString +PlaySpeedRangeMapper::getUnit() const +{ + return "%"; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/audio/PlaySpeedRangeMapper.h Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,49 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + This file copyright 2006 QMUL. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#ifndef _PLAY_SPEED_RANGE_MAPPER_H_ +#define _PLAY_SPEED_RANGE_MAPPER_H_ + +#include "base/RangeMapper.h" + +class PlaySpeedRangeMapper : public RangeMapper +{ +public: + PlaySpeedRangeMapper(); + + int getMinPosition() const { return m_minpos; } + int getMaxPosition() const { return m_maxpos; } + + virtual int getPositionForValue(double value) const; + virtual int getPositionForValueUnclamped(double value) const; + + virtual double getValueForPosition(int position) const; + virtual double getValueForPositionUnclamped(int position) const; + + int getPositionForFactor(double factor) const; + double getValueForFactor(double factor) const; + + double getFactorForPosition(int position) const; + double getFactorForValue(double value) const; + + virtual QString getUnit() const; + +protected: + int m_minpos; + int m_maxpos; +}; + + +#endif
--- a/audioio/AudioCallbackPlaySource.cpp Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1897 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam and QMUL. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#include "AudioCallbackPlaySource.h" - -#include "AudioGenerator.h" - -#include "data/model/Model.h" -#include "base/ViewManagerBase.h" -#include "base/PlayParameterRepository.h" -#include "base/Preferences.h" -#include "data/model/DenseTimeValueModel.h" -#include "data/model/WaveFileModel.h" -#include "data/model/SparseOneDimensionalModel.h" -#include "plugin/RealTimePluginInstance.h" - -#include "AudioCallbackPlayTarget.h" - -#include <rubberband/RubberBandStretcher.h> -using namespace RubberBand; - -#include <iostream> -#include <cassert> - -//#define DEBUG_AUDIO_PLAY_SOURCE 1 -//#define DEBUG_AUDIO_PLAY_SOURCE_PLAYING 1 - -static const int DEFAULT_RING_BUFFER_SIZE = 131071; - -AudioCallbackPlaySource::AudioCallbackPlaySource(ViewManagerBase *manager, - QString clientName) : - m_viewManager(manager), - m_audioGenerator(new AudioGenerator()), - m_clientName(clientName), - m_readBuffers(0), - m_writeBuffers(0), - m_readBufferFill(0), - m_writeBufferFill(0), - m_bufferScavenger(1), - m_sourceChannelCount(0), - m_blockSize(1024), - m_sourceSampleRate(0), - m_targetSampleRate(0), - m_playLatency(0), - m_target(0), - m_lastRetrievalTimestamp(0.0), - m_lastRetrievedBlockSize(0), - m_trustworthyTimestamps(true), - m_lastCurrentFrame(0), - m_playing(false), - m_exiting(false), - m_lastModelEndFrame(0), - m_ringBufferSize(DEFAULT_RING_BUFFER_SIZE), - m_outputLeft(0.0), - m_outputRight(0.0), - m_auditioningPlugin(0), - m_auditioningPluginBypassed(false), - m_playStartFrame(0), - m_playStartFramePassed(false), - m_timeStretcher(0), - m_monoStretcher(0), - m_stretchRatio(1.0), - m_stretchMono(false), - m_stretcherInputCount(0), - m_stretcherInputs(0), - m_stretcherInputSizes(0), - m_fillThread(0), - m_converter(0), - m_crapConverter(0), - m_resampleQuality(Preferences::getInstance()->getResampleQuality()) -{ - m_viewManager->setAudioPlaySource(this); - - connect(m_viewManager, SIGNAL(selectionChanged()), - this, SLOT(selectionChanged())); - connect(m_viewManager, SIGNAL(playLoopModeChanged()), - this, SLOT(playLoopModeChanged())); - connect(m_viewManager, SIGNAL(playSelectionModeChanged()), - this, SLOT(playSelectionModeChanged())); - - connect(this, SIGNAL(playStatusChanged(bool)), - m_viewManager, SLOT(playStatusChanged(bool))); - - connect(PlayParameterRepository::getInstance(), - SIGNAL(playParametersChanged(PlayParameters *)), - this, SLOT(playParametersChanged(PlayParameters *))); - - connect(Preferences::getInstance(), - SIGNAL(propertyChanged(PropertyContainer::PropertyName)), - this, SLOT(preferenceChanged(PropertyContainer::PropertyName))); -} - -AudioCallbackPlaySource::~AudioCallbackPlaySource() -{ -#ifdef DEBUG_AUDIO_PLAY_SOURCE - SVDEBUG << "AudioCallbackPlaySource::~AudioCallbackPlaySource entering" << endl; -#endif - m_exiting = true; - - if (m_fillThread) { -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource dtor: awakening thread" << endl; -#endif - m_condition.wakeAll(); - m_fillThread->wait(); - delete m_fillThread; - } - - clearModels(); - - if (m_readBuffers != m_writeBuffers) { - delete m_readBuffers; - } - - delete m_writeBuffers; - - delete m_audioGenerator; - - for (int i = 0; i < m_stretcherInputCount; ++i) { - delete[] m_stretcherInputs[i]; - } - delete[] m_stretcherInputSizes; - delete[] m_stretcherInputs; - - delete m_timeStretcher; - delete m_monoStretcher; - - m_bufferScavenger.scavenge(true); - m_pluginScavenger.scavenge(true); -#ifdef DEBUG_AUDIO_PLAY_SOURCE - SVDEBUG << "AudioCallbackPlaySource::~AudioCallbackPlaySource finishing" << endl; -#endif -} - -void -AudioCallbackPlaySource::addModel(Model *model) -{ - if (m_models.find(model) != m_models.end()) return; - - bool willPlay = m_audioGenerator->addModel(model); - - m_mutex.lock(); - - m_models.insert(model); - if (model->getEndFrame() > m_lastModelEndFrame) { - m_lastModelEndFrame = model->getEndFrame(); - } - - bool buffersChanged = false, srChanged = false; - - int modelChannels = 1; - DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model); - if (dtvm) modelChannels = dtvm->getChannelCount(); - if (modelChannels > m_sourceChannelCount) { - m_sourceChannelCount = modelChannels; - } - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource: Adding model with " << modelChannels << " channels at rate " << model->getSampleRate() << endl; -#endif - - if (m_sourceSampleRate == 0) { - - m_sourceSampleRate = model->getSampleRate(); - srChanged = true; - - } else if (model->getSampleRate() != m_sourceSampleRate) { - - // If this is a dense time-value model and we have no other, we - // can just switch to this model's sample rate - - if (dtvm) { - - bool conflicting = false; - - for (std::set<Model *>::const_iterator i = m_models.begin(); - i != m_models.end(); ++i) { - // Only wave file models can be considered conflicting -- - // writable wave file models are derived and we shouldn't - // take their rates into account. Also, don't give any - // particular weight to a file that's already playing at - // the wrong rate anyway - WaveFileModel *wfm = dynamic_cast<WaveFileModel *>(*i); - if (wfm && wfm != dtvm && - wfm->getSampleRate() != model->getSampleRate() && - wfm->getSampleRate() == m_sourceSampleRate) { - SVDEBUG << "AudioCallbackPlaySource::addModel: Conflicting wave file model " << *i << " found" << endl; - conflicting = true; - break; - } - } - - if (conflicting) { - - SVDEBUG << "AudioCallbackPlaySource::addModel: ERROR: " - << "New model sample rate does not match" << endl - << "existing model(s) (new " << model->getSampleRate() - << " vs " << m_sourceSampleRate - << "), playback will be wrong" - << endl; - - emit sampleRateMismatch(model->getSampleRate(), - m_sourceSampleRate, - false); - } else { - m_sourceSampleRate = model->getSampleRate(); - srChanged = true; - } - } - } - - if (!m_writeBuffers || (int)m_writeBuffers->size() < getTargetChannelCount()) { - clearRingBuffers(true, getTargetChannelCount()); - buffersChanged = true; - } else { - if (willPlay) clearRingBuffers(true); - } - - if (buffersChanged || srChanged) { - if (m_converter) { - src_delete(m_converter); - src_delete(m_crapConverter); - m_converter = 0; - m_crapConverter = 0; - } - } - - rebuildRangeLists(); - - m_mutex.unlock(); - - m_audioGenerator->setTargetChannelCount(getTargetChannelCount()); - - if (!m_fillThread) { - m_fillThread = new FillThread(*this); - m_fillThread->start(); - } - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource::addModel: now have " << m_models.size() << " model(s) -- emitting modelReplaced" << endl; -#endif - - if (buffersChanged || srChanged) { - emit modelReplaced(); - } - - connect(model, SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)), - this, SLOT(modelChangedWithin(sv_frame_t, sv_frame_t))); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource::addModel: awakening thread" << endl; -#endif - - m_condition.wakeAll(); -} - -void -AudioCallbackPlaySource::modelChangedWithin(sv_frame_t -#ifdef DEBUG_AUDIO_PLAY_SOURCE - startFrame -#endif - , sv_frame_t endFrame) -{ -#ifdef DEBUG_AUDIO_PLAY_SOURCE - SVDEBUG << "AudioCallbackPlaySource::modelChangedWithin(" << startFrame << "," << endFrame << ")" << endl; -#endif - if (endFrame > m_lastModelEndFrame) { - m_lastModelEndFrame = endFrame; - rebuildRangeLists(); - } -} - -void -AudioCallbackPlaySource::removeModel(Model *model) -{ - m_mutex.lock(); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource::removeModel(" << model << ")" << endl; -#endif - - disconnect(model, SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)), - this, SLOT(modelChangedWithin(sv_frame_t, sv_frame_t))); - - m_models.erase(model); - - if (m_models.empty()) { - if (m_converter) { - src_delete(m_converter); - src_delete(m_crapConverter); - m_converter = 0; - m_crapConverter = 0; - } - m_sourceSampleRate = 0; - } - - sv_frame_t lastEnd = 0; - for (std::set<Model *>::const_iterator i = m_models.begin(); - i != m_models.end(); ++i) { -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource::removeModel(" << model << "): checking end frame on model " << *i << endl; -#endif - if ((*i)->getEndFrame() > lastEnd) { - lastEnd = (*i)->getEndFrame(); - } -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "(done, lastEnd now " << lastEnd << ")" << endl; -#endif - } - m_lastModelEndFrame = lastEnd; - - m_audioGenerator->removeModel(model); - - m_mutex.unlock(); - - clearRingBuffers(); -} - -void -AudioCallbackPlaySource::clearModels() -{ - m_mutex.lock(); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource::clearModels()" << endl; -#endif - - m_models.clear(); - - if (m_converter) { - src_delete(m_converter); - src_delete(m_crapConverter); - m_converter = 0; - m_crapConverter = 0; - } - - m_lastModelEndFrame = 0; - - m_sourceSampleRate = 0; - - m_mutex.unlock(); - - m_audioGenerator->clearModels(); - - clearRingBuffers(); -} - -void -AudioCallbackPlaySource::clearRingBuffers(bool haveLock, int count) -{ - if (!haveLock) m_mutex.lock(); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cerr << "clearRingBuffers" << endl; -#endif - - rebuildRangeLists(); - - if (count == 0) { - if (m_writeBuffers) count = int(m_writeBuffers->size()); - } - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cerr << "current playing frame = " << getCurrentPlayingFrame() << endl; - - cerr << "write buffer fill (before) = " << m_writeBufferFill << endl; -#endif - - m_writeBufferFill = getCurrentBufferedFrame(); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cerr << "current buffered frame = " << m_writeBufferFill << endl; -#endif - - if (m_readBuffers != m_writeBuffers) { - delete m_writeBuffers; - } - - m_writeBuffers = new RingBufferVector; - - for (int i = 0; i < count; ++i) { - m_writeBuffers->push_back(new RingBuffer<float>(m_ringBufferSize)); - } - - m_audioGenerator->reset(); - -// cout << "AudioCallbackPlaySource::clearRingBuffers: Created " -// << count << " write buffers" << endl; - - if (!haveLock) { - m_mutex.unlock(); - } -} - -void -AudioCallbackPlaySource::play(sv_frame_t startFrame) -{ - if (!m_sourceSampleRate) { - cerr << "AudioCallbackPlaySource::play: No source sample rate available, not playing" << endl; - return; - } - - if (m_viewManager->getPlaySelectionMode() && - !m_viewManager->getSelections().empty()) { - - SVDEBUG << "AudioCallbackPlaySource::play: constraining frame " << startFrame << " to selection = "; - - startFrame = m_viewManager->constrainFrameToSelection(startFrame); - - SVDEBUG << startFrame << endl; - - } else { - if (startFrame < 0) { - startFrame = 0; - } - if (startFrame >= m_lastModelEndFrame) { - startFrame = 0; - } - } - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cerr << "play(" << startFrame << ") -> playback model "; -#endif - - startFrame = m_viewManager->alignReferenceToPlaybackFrame(startFrame); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cerr << startFrame << endl; -#endif - - // The fill thread will automatically empty its buffers before - // starting again if we have not so far been playing, but not if - // we're just re-seeking. - // NO -- we can end up playing some first -- always reset here - - m_mutex.lock(); - - if (m_timeStretcher) { - m_timeStretcher->reset(); - } - if (m_monoStretcher) { - m_monoStretcher->reset(); - } - - m_readBufferFill = m_writeBufferFill = startFrame; - if (m_readBuffers) { - for (int c = 0; c < getTargetChannelCount(); ++c) { - RingBuffer<float> *rb = getReadRingBuffer(c); -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cerr << "reset ring buffer for channel " << c << endl; -#endif - if (rb) rb->reset(); - } - } - if (m_converter) src_reset(m_converter); - if (m_crapConverter) src_reset(m_crapConverter); - - m_mutex.unlock(); - - m_audioGenerator->reset(); - - m_playStartFrame = startFrame; - m_playStartFramePassed = false; - m_playStartedAt = RealTime::zeroTime; - if (m_target) { - m_playStartedAt = RealTime::fromSeconds(m_target->getCurrentTime()); - } - - bool changed = !m_playing; - m_lastRetrievalTimestamp = 0; - m_lastCurrentFrame = 0; - m_playing = true; - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource::play: awakening thread" << endl; -#endif - - m_condition.wakeAll(); - if (changed) { - emit playStatusChanged(m_playing); - emit activity(tr("Play from %1").arg - (RealTime::frame2RealTime - (m_playStartFrame, m_sourceSampleRate).toText().c_str())); - } -} - -void -AudioCallbackPlaySource::stop() -{ -#ifdef DEBUG_AUDIO_PLAY_SOURCE - SVDEBUG << "AudioCallbackPlaySource::stop()" << endl; -#endif - bool changed = m_playing; - m_playing = false; - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource::stop: awakening thread" << endl; -#endif - - m_condition.wakeAll(); - m_lastRetrievalTimestamp = 0; - if (changed) { - emit playStatusChanged(m_playing); - emit activity(tr("Stop at %1").arg - (RealTime::frame2RealTime - (m_lastCurrentFrame, m_sourceSampleRate).toText().c_str())); - } - m_lastCurrentFrame = 0; -} - -void -AudioCallbackPlaySource::selectionChanged() -{ - if (m_viewManager->getPlaySelectionMode()) { - clearRingBuffers(); - } -} - -void -AudioCallbackPlaySource::playLoopModeChanged() -{ - clearRingBuffers(); -} - -void -AudioCallbackPlaySource::playSelectionModeChanged() -{ - if (!m_viewManager->getSelections().empty()) { - clearRingBuffers(); - } -} - -void -AudioCallbackPlaySource::playParametersChanged(PlayParameters *) -{ - clearRingBuffers(); -} - -void -AudioCallbackPlaySource::preferenceChanged(PropertyContainer::PropertyName n) -{ - if (n == "Resample Quality") { - setResampleQuality(Preferences::getInstance()->getResampleQuality()); - } -} - -void -AudioCallbackPlaySource::audioProcessingOverload() -{ - cerr << "Audio processing overload!" << endl; - - if (!m_playing) return; - - RealTimePluginInstance *ap = m_auditioningPlugin; - if (ap && !m_auditioningPluginBypassed) { - m_auditioningPluginBypassed = true; - emit audioOverloadPluginDisabled(); - return; - } - - if (m_timeStretcher && - m_timeStretcher->getTimeRatio() < 1.0 && - m_stretcherInputCount > 1 && - m_monoStretcher && !m_stretchMono) { - m_stretchMono = true; - emit audioTimeStretchMultiChannelDisabled(); - return; - } -} - -void -AudioCallbackPlaySource::setTarget(AudioCallbackPlayTarget *target, int size) -{ - m_target = target; - cout << "AudioCallbackPlaySource::setTarget: Block size -> " << size << endl; - if (size != 0) { - m_blockSize = size; - } - if (size * 4 > m_ringBufferSize) { - SVDEBUG << "AudioCallbackPlaySource::setTarget: Buffer size " - << size << " > a quarter of ring buffer size " - << m_ringBufferSize << ", calling for more ring buffer" - << endl; - m_ringBufferSize = size * 4; - if (m_writeBuffers && !m_writeBuffers->empty()) { - clearRingBuffers(); - } - } -} - -int -AudioCallbackPlaySource::getTargetBlockSize() const -{ -// cout << "AudioCallbackPlaySource::getTargetBlockSize() -> " << m_blockSize << endl; - return int(m_blockSize); -} - -void -AudioCallbackPlaySource::setTargetPlayLatency(sv_frame_t latency) -{ - m_playLatency = latency; -} - -sv_frame_t -AudioCallbackPlaySource::getTargetPlayLatency() const -{ - return m_playLatency; -} - -sv_frame_t -AudioCallbackPlaySource::getCurrentPlayingFrame() -{ - // This method attempts to estimate which audio sample frame is - // "currently coming through the speakers". - - sv_samplerate_t targetRate = getTargetSampleRate(); - sv_frame_t latency = m_playLatency; // at target rate - RealTime latency_t = RealTime::zeroTime; - - if (targetRate != 0) { - latency_t = RealTime::frame2RealTime(latency, targetRate); - } - - return getCurrentFrame(latency_t); -} - -sv_frame_t -AudioCallbackPlaySource::getCurrentBufferedFrame() -{ - return getCurrentFrame(RealTime::zeroTime); -} - -sv_frame_t -AudioCallbackPlaySource::getCurrentFrame(RealTime latency_t) -{ - // We resample when filling the ring buffer, and time-stretch when - // draining it. The buffer contains data at the "target rate" and - // the latency provided by the target is also at the target rate. - // Because of the multiple rates involved, we do the actual - // calculation using RealTime instead. - - sv_samplerate_t sourceRate = getSourceSampleRate(); - sv_samplerate_t targetRate = getTargetSampleRate(); - - if (sourceRate == 0 || targetRate == 0) return 0; - - int inbuffer = 0; // at target rate - - for (int c = 0; c < getTargetChannelCount(); ++c) { - RingBuffer<float> *rb = getReadRingBuffer(c); - if (rb) { - int here = rb->getReadSpace(); - if (c == 0 || here < inbuffer) inbuffer = here; - } - } - - sv_frame_t readBufferFill = m_readBufferFill; - sv_frame_t lastRetrievedBlockSize = m_lastRetrievedBlockSize; - double lastRetrievalTimestamp = m_lastRetrievalTimestamp; - double currentTime = 0.0; - if (m_target) currentTime = m_target->getCurrentTime(); - - bool looping = m_viewManager->getPlayLoopMode(); - - RealTime inbuffer_t = RealTime::frame2RealTime(inbuffer, targetRate); - - sv_frame_t stretchlat = 0; - double timeRatio = 1.0; - - if (m_timeStretcher) { - stretchlat = m_timeStretcher->getLatency(); - timeRatio = m_timeStretcher->getTimeRatio(); - } - - RealTime stretchlat_t = RealTime::frame2RealTime(stretchlat, targetRate); - - // When the target has just requested a block from us, the last - // sample it obtained was our buffer fill frame count minus the - // amount of read space (converted back to source sample rate) - // remaining now. That sample is not expected to be played until - // the target's play latency has elapsed. By the time the - // following block is requested, that sample will be at the - // target's play latency minus the last requested block size away - // from being played. - - RealTime sincerequest_t = RealTime::zeroTime; - RealTime lastretrieved_t = RealTime::zeroTime; - - if (m_target && - m_trustworthyTimestamps && - lastRetrievalTimestamp != 0.0) { - - lastretrieved_t = RealTime::frame2RealTime - (lastRetrievedBlockSize, targetRate); - - // calculate number of frames at target rate that have elapsed - // since the end of the last call to getSourceSamples - - if (m_trustworthyTimestamps && !looping) { - - // this adjustment seems to cause more problems when looping - double elapsed = currentTime - lastRetrievalTimestamp; - - if (elapsed > 0.0) { - sincerequest_t = RealTime::fromSeconds(elapsed); - } - } - - } else { - - lastretrieved_t = RealTime::frame2RealTime - (getTargetBlockSize(), targetRate); - } - - RealTime bufferedto_t = RealTime::frame2RealTime(readBufferFill, sourceRate); - - if (timeRatio != 1.0) { - lastretrieved_t = lastretrieved_t / timeRatio; - sincerequest_t = sincerequest_t / timeRatio; - latency_t = latency_t / timeRatio; - } - -#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING - cerr << "\nbuffered to: " << bufferedto_t << ", in buffer: " << inbuffer_t << ", time ratio " << timeRatio << "\n stretcher latency: " << stretchlat_t << ", device latency: " << latency_t << "\n since request: " << sincerequest_t << ", last retrieved quantity: " << lastretrieved_t << endl; -#endif - - // Normally the range lists should contain at least one item each - // -- if playback is unconstrained, that item should report the - // entire source audio duration. - - if (m_rangeStarts.empty()) { - rebuildRangeLists(); - } - - if (m_rangeStarts.empty()) { - // this code is only used in case of error in rebuildRangeLists - RealTime playing_t = bufferedto_t - - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t - + sincerequest_t; - if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime; - sv_frame_t frame = RealTime::realTime2Frame(playing_t, sourceRate); - return m_viewManager->alignPlaybackFrameToReference(frame); - } - - int inRange = 0; - int index = 0; - - for (int i = 0; i < (int)m_rangeStarts.size(); ++i) { - if (bufferedto_t >= m_rangeStarts[i]) { - inRange = index; - } else { - break; - } - ++index; - } - - if (inRange >= int(m_rangeStarts.size())) { - inRange = int(m_rangeStarts.size())-1; - } - - RealTime playing_t = bufferedto_t; - - playing_t = playing_t - - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t - + sincerequest_t; - - // This rather gross little hack is used to ensure that latency - // compensation doesn't result in the playback pointer appearing - // to start earlier than the actual playback does. It doesn't - // work properly (hence the bail-out in the middle) because if we - // are playing a relatively short looped region, the playing time - // estimated from the buffer fill frame may have wrapped around - // the region boundary and end up being much smaller than the - // theoretical play start frame, perhaps even for the entire - // duration of playback! - - if (!m_playStartFramePassed) { - RealTime playstart_t = RealTime::frame2RealTime(m_playStartFrame, - sourceRate); - if (playing_t < playstart_t) { -// cerr << "playing_t " << playing_t << " < playstart_t " -// << playstart_t << endl; - if (/*!!! sincerequest_t > RealTime::zeroTime && */ - m_playStartedAt + latency_t + stretchlat_t < - RealTime::fromSeconds(currentTime)) { -// cerr << "but we've been playing for long enough that I think we should disregard it (it probably results from loop wrapping)" << endl; - m_playStartFramePassed = true; - } else { - playing_t = playstart_t; - } - } else { - m_playStartFramePassed = true; - } - } - -#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING - cerr << "playing_t " << playing_t; -#endif - - playing_t = playing_t - m_rangeStarts[inRange]; - -#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING - cerr << " as offset into range " << inRange << " (start =" << m_rangeStarts[inRange] << " duration =" << m_rangeDurations[inRange] << ") = " << playing_t << endl; -#endif - - while (playing_t < RealTime::zeroTime) { - - if (inRange == 0) { - if (looping) { - inRange = int(m_rangeStarts.size()) - 1; - } else { - break; - } - } else { - --inRange; - } - - playing_t = playing_t + m_rangeDurations[inRange]; - } - - playing_t = playing_t + m_rangeStarts[inRange]; - -#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING - cerr << " playing time: " << playing_t << endl; -#endif - - if (!looping) { - if (inRange == (int)m_rangeStarts.size()-1 && - playing_t >= m_rangeStarts[inRange] + m_rangeDurations[inRange]) { -cerr << "Not looping, inRange " << inRange << " == rangeStarts.size()-1, playing_t " << playing_t << " >= m_rangeStarts[inRange] " << m_rangeStarts[inRange] << " + m_rangeDurations[inRange] " << m_rangeDurations[inRange] << " -- stopping" << endl; - stop(); - } - } - - if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime; - - sv_frame_t frame = RealTime::realTime2Frame(playing_t, sourceRate); - - if (m_lastCurrentFrame > 0 && !looping) { - if (frame < m_lastCurrentFrame) { - frame = m_lastCurrentFrame; - } - } - - m_lastCurrentFrame = frame; - - return m_viewManager->alignPlaybackFrameToReference(frame); -} - -void -AudioCallbackPlaySource::rebuildRangeLists() -{ - bool constrained = (m_viewManager->getPlaySelectionMode()); - - m_rangeStarts.clear(); - m_rangeDurations.clear(); - - sv_samplerate_t sourceRate = getSourceSampleRate(); - if (sourceRate == 0) return; - - RealTime end = RealTime::frame2RealTime(m_lastModelEndFrame, sourceRate); - if (end == RealTime::zeroTime) return; - - if (!constrained) { - m_rangeStarts.push_back(RealTime::zeroTime); - m_rangeDurations.push_back(end); - return; - } - - MultiSelection::SelectionList selections = m_viewManager->getSelections(); - MultiSelection::SelectionList::const_iterator i; - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - SVDEBUG << "AudioCallbackPlaySource::rebuildRangeLists" << endl; -#endif - - if (!selections.empty()) { - - for (i = selections.begin(); i != selections.end(); ++i) { - - RealTime start = - (RealTime::frame2RealTime - (m_viewManager->alignReferenceToPlaybackFrame(i->getStartFrame()), - sourceRate)); - RealTime duration = - (RealTime::frame2RealTime - (m_viewManager->alignReferenceToPlaybackFrame(i->getEndFrame()) - - m_viewManager->alignReferenceToPlaybackFrame(i->getStartFrame()), - sourceRate)); - - m_rangeStarts.push_back(start); - m_rangeDurations.push_back(duration); - } - } else { - m_rangeStarts.push_back(RealTime::zeroTime); - m_rangeDurations.push_back(end); - } - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cerr << "Now have " << m_rangeStarts.size() << " play ranges" << endl; -#endif -} - -void -AudioCallbackPlaySource::setOutputLevels(float left, float right) -{ - m_outputLeft = left; - m_outputRight = right; -} - -bool -AudioCallbackPlaySource::getOutputLevels(float &left, float &right) -{ - left = m_outputLeft; - right = m_outputRight; - return true; -} - -void -AudioCallbackPlaySource::setTargetSampleRate(sv_samplerate_t sr) -{ - bool first = (m_targetSampleRate == 0); - - m_targetSampleRate = sr; - initialiseConverter(); - - if (first && (m_stretchRatio != 1.f)) { - // couldn't create a stretcher before because we had no sample - // rate: make one now - setTimeStretch(m_stretchRatio); - } -} - -void -AudioCallbackPlaySource::initialiseConverter() -{ - m_mutex.lock(); - - if (m_converter) { - src_delete(m_converter); - src_delete(m_crapConverter); - m_converter = 0; - m_crapConverter = 0; - } - - if (getSourceSampleRate() != getTargetSampleRate()) { - - int err = 0; - - m_converter = src_new(m_resampleQuality == 2 ? SRC_SINC_BEST_QUALITY : - m_resampleQuality == 1 ? SRC_SINC_MEDIUM_QUALITY : - m_resampleQuality == 0 ? SRC_SINC_FASTEST : - SRC_SINC_MEDIUM_QUALITY, - getTargetChannelCount(), &err); - - if (m_converter) { - m_crapConverter = src_new(SRC_LINEAR, - getTargetChannelCount(), - &err); - } - - if (!m_converter || !m_crapConverter) { - cerr - << "AudioCallbackPlaySource::setModel: ERROR in creating samplerate converter: " - << src_strerror(err) << endl; - - if (m_converter) { - src_delete(m_converter); - m_converter = 0; - } - - if (m_crapConverter) { - src_delete(m_crapConverter); - m_crapConverter = 0; - } - - m_mutex.unlock(); - - emit sampleRateMismatch(getSourceSampleRate(), - getTargetSampleRate(), - false); - } else { - - m_mutex.unlock(); - - emit sampleRateMismatch(getSourceSampleRate(), - getTargetSampleRate(), - true); - } - } else { - m_mutex.unlock(); - } -} - -void -AudioCallbackPlaySource::setResampleQuality(int q) -{ - if (q == m_resampleQuality) return; - m_resampleQuality = q; - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - SVDEBUG << "AudioCallbackPlaySource::setResampleQuality: setting to " - << m_resampleQuality << endl; -#endif - - initialiseConverter(); -} - -void -AudioCallbackPlaySource::setAuditioningEffect(Auditionable *a) -{ - RealTimePluginInstance *plugin = dynamic_cast<RealTimePluginInstance *>(a); - if (a && !plugin) { - cerr << "WARNING: AudioCallbackPlaySource::setAuditioningEffect: auditionable object " << a << " is not a real-time plugin instance" << endl; - } - - m_mutex.lock(); - m_auditioningPlugin = plugin; - m_auditioningPluginBypassed = false; - m_mutex.unlock(); -} - -void -AudioCallbackPlaySource::setSoloModelSet(std::set<Model *> s) -{ - m_audioGenerator->setSoloModelSet(s); - clearRingBuffers(); -} - -void -AudioCallbackPlaySource::clearSoloModelSet() -{ - m_audioGenerator->clearSoloModelSet(); - clearRingBuffers(); -} - -sv_samplerate_t -AudioCallbackPlaySource::getTargetSampleRate() const -{ - if (m_targetSampleRate) return m_targetSampleRate; - else return getSourceSampleRate(); -} - -int -AudioCallbackPlaySource::getSourceChannelCount() const -{ - return m_sourceChannelCount; -} - -int -AudioCallbackPlaySource::getTargetChannelCount() const -{ - if (m_sourceChannelCount < 2) return 2; - return m_sourceChannelCount; -} - -sv_samplerate_t -AudioCallbackPlaySource::getSourceSampleRate() const -{ - return m_sourceSampleRate; -} - -void -AudioCallbackPlaySource::setTimeStretch(double factor) -{ - m_stretchRatio = factor; - - if (!getTargetSampleRate()) return; // have to make our stretcher later - - if (m_timeStretcher || (factor == 1.0)) { - // stretch ratio will be set in next process call if appropriate - } else { - m_stretcherInputCount = getTargetChannelCount(); - RubberBandStretcher *stretcher = new RubberBandStretcher - (int(getTargetSampleRate()), - m_stretcherInputCount, - RubberBandStretcher::OptionProcessRealTime, - factor); - RubberBandStretcher *monoStretcher = new RubberBandStretcher - (int(getTargetSampleRate()), - 1, - RubberBandStretcher::OptionProcessRealTime, - factor); - m_stretcherInputs = new float *[m_stretcherInputCount]; - m_stretcherInputSizes = new sv_frame_t[m_stretcherInputCount]; - for (int c = 0; c < m_stretcherInputCount; ++c) { - m_stretcherInputSizes[c] = 16384; - m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]]; - } - m_monoStretcher = monoStretcher; - m_timeStretcher = stretcher; - } - - emit activity(tr("Change time-stretch factor to %1").arg(factor)); -} - -sv_frame_t -AudioCallbackPlaySource::getSourceSamples(sv_frame_t count, float **buffer) -{ - if (!m_playing) { -#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING - SVDEBUG << "AudioCallbackPlaySource::getSourceSamples: Not playing" << endl; -#endif - for (int ch = 0; ch < getTargetChannelCount(); ++ch) { - for (int i = 0; i < count; ++i) { - buffer[ch][i] = 0.0; - } - } - return 0; - } - -#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING - SVDEBUG << "AudioCallbackPlaySource::getSourceSamples: Playing" << endl; -#endif - - // Ensure that all buffers have at least the amount of data we - // need -- else reduce the size of our requests correspondingly - - for (int ch = 0; ch < getTargetChannelCount(); ++ch) { - - RingBuffer<float> *rb = getReadRingBuffer(ch); - - if (!rb) { - cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: " - << "No ring buffer available for channel " << ch - << ", returning no data here" << endl; - count = 0; - break; - } - - int rs = rb->getReadSpace(); - if (rs < count) { -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: " - << "Ring buffer for channel " << ch << " has only " - << rs << " (of " << count << ") samples available (" - << "ring buffer size is " << rb->getSize() << ", write " - << "space " << rb->getWriteSpace() << "), " - << "reducing request size" << endl; -#endif - count = rs; - } - } - - if (count == 0) return 0; - - RubberBandStretcher *ts = m_timeStretcher; - RubberBandStretcher *ms = m_monoStretcher; - - double ratio = ts ? ts->getTimeRatio() : 1.0; - - if (ratio != m_stretchRatio) { - if (!ts) { - cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: Time ratio change to " << m_stretchRatio << " is pending, but no stretcher is set" << endl; - m_stretchRatio = 1.0; - } else { - ts->setTimeRatio(m_stretchRatio); - if (ms) ms->setTimeRatio(m_stretchRatio); - if (m_stretchRatio >= 1.0) m_stretchMono = false; - } - } - - int stretchChannels = m_stretcherInputCount; - if (m_stretchMono) { - if (ms) { - ts = ms; - stretchChannels = 1; - } else { - m_stretchMono = false; - } - } - - if (m_target) { - m_lastRetrievedBlockSize = count; - m_lastRetrievalTimestamp = m_target->getCurrentTime(); - } - - if (!ts || ratio == 1.f) { - - int got = 0; - - for (int ch = 0; ch < getTargetChannelCount(); ++ch) { - - RingBuffer<float> *rb = getReadRingBuffer(ch); - - if (rb) { - - // this is marginally more likely to leave our channels in - // sync after a processing failure than just passing "count": - sv_frame_t request = count; - if (ch > 0) request = got; - - got = rb->read(buffer[ch], int(request)); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING - cout << "AudioCallbackPlaySource::getSamples: got " << got << " (of " << count << ") samples on channel " << ch << ", signalling for more (possibly)" << endl; -#endif - } - - for (int ch = 0; ch < getTargetChannelCount(); ++ch) { - for (int i = got; i < count; ++i) { - buffer[ch][i] = 0.0; - } - } - } - - applyAuditioningEffect(count, buffer); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource::getSamples: awakening thread" << endl; -#endif - - m_condition.wakeAll(); - - return got; - } - - int channels = getTargetChannelCount(); - sv_frame_t available; - sv_frame_t fedToStretcher = 0; - int warned = 0; - - // The input block for a given output is approx output / ratio, - // but we can't predict it exactly, for an adaptive timestretcher. - - while ((available = ts->available()) < count) { - - sv_frame_t reqd = lrint(double(count - available) / ratio); - reqd = std::max(reqd, sv_frame_t(ts->getSamplesRequired())); - if (reqd == 0) reqd = 1; - - sv_frame_t got = reqd; - -#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING - cerr << "reqd = " <<reqd << ", channels = " << channels << ", ic = " << m_stretcherInputCount << endl; -#endif - - for (int c = 0; c < channels; ++c) { - if (c >= m_stretcherInputCount) continue; - if (reqd > m_stretcherInputSizes[c]) { - if (c == 0) { - cerr << "WARNING: resizing stretcher input buffer from " << m_stretcherInputSizes[c] << " to " << (reqd * 2) << endl; - } - delete[] m_stretcherInputs[c]; - m_stretcherInputSizes[c] = reqd * 2; - m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]]; - } - } - - for (int c = 0; c < channels; ++c) { - if (c >= m_stretcherInputCount) continue; - RingBuffer<float> *rb = getReadRingBuffer(c); - if (rb) { - sv_frame_t gotHere; - if (stretchChannels == 1 && c > 0) { - gotHere = rb->readAdding(m_stretcherInputs[0], int(got)); - } else { - gotHere = rb->read(m_stretcherInputs[c], int(got)); - } - if (gotHere < got) got = gotHere; - -#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING - if (c == 0) { - SVDEBUG << "feeding stretcher: got " << gotHere - << ", " << rb->getReadSpace() << " remain" << endl; - } -#endif - - } else { - cerr << "WARNING: No ring buffer available for channel " << c << " in stretcher input block" << endl; - } - } - - if (got < reqd) { - cerr << "WARNING: Read underrun in playback (" - << got << " < " << reqd << ")" << endl; - } - - ts->process(m_stretcherInputs, size_t(got), false); - - fedToStretcher += got; - - if (got == 0) break; - - if (ts->available() == available) { - cerr << "WARNING: AudioCallbackPlaySource::getSamples: Added " << got << " samples to time stretcher, created no new available output samples (warned = " << warned << ")" << endl; - if (++warned == 5) break; - } - } - - ts->retrieve(buffer, size_t(count)); - - for (int c = stretchChannels; c < getTargetChannelCount(); ++c) { - for (int i = 0; i < count; ++i) { - buffer[c][i] = buffer[0][i]; - } - } - - applyAuditioningEffect(count, buffer); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySource::getSamples [stretched]: awakening thread" << endl; -#endif - - m_condition.wakeAll(); - - return count; -} - -void -AudioCallbackPlaySource::applyAuditioningEffect(sv_frame_t count, float **buffers) -{ - if (m_auditioningPluginBypassed) return; - RealTimePluginInstance *plugin = m_auditioningPlugin; - if (!plugin) return; - - if ((int)plugin->getAudioInputCount() != getTargetChannelCount()) { -// cerr << "plugin input count " << plugin->getAudioInputCount() -// << " != our channel count " << getTargetChannelCount() -// << endl; - return; - } - if ((int)plugin->getAudioOutputCount() != getTargetChannelCount()) { -// cerr << "plugin output count " << plugin->getAudioOutputCount() -// << " != our channel count " << getTargetChannelCount() -// << endl; - return; - } - if ((int)plugin->getBufferSize() < count) { -// cerr << "plugin buffer size " << plugin->getBufferSize() -// << " < our block size " << count -// << endl; - return; - } - - float **ib = plugin->getAudioInputBuffers(); - float **ob = plugin->getAudioOutputBuffers(); - - for (int c = 0; c < getTargetChannelCount(); ++c) { - for (int i = 0; i < count; ++i) { - ib[c][i] = buffers[c][i]; - } - } - - plugin->run(Vamp::RealTime::zeroTime, int(count)); - - for (int c = 0; c < getTargetChannelCount(); ++c) { - for (int i = 0; i < count; ++i) { - buffers[c][i] = ob[c][i]; - } - } -} - -// Called from fill thread, m_playing true, mutex held -bool -AudioCallbackPlaySource::fillBuffers() -{ - static float *tmp = 0; - static sv_frame_t tmpSize = 0; - - sv_frame_t space = 0; - for (int c = 0; c < getTargetChannelCount(); ++c) { - RingBuffer<float> *wb = getWriteRingBuffer(c); - if (wb) { - sv_frame_t spaceHere = wb->getWriteSpace(); - if (c == 0 || spaceHere < space) space = spaceHere; - } - } - - if (space == 0) { -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySourceFillThread: no space to fill" << endl; -#endif - return false; - } - - sv_frame_t f = m_writeBufferFill; - - bool readWriteEqual = (m_readBuffers == m_writeBuffers); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - if (!readWriteEqual) { - cout << "AudioCallbackPlaySourceFillThread: note read buffers != write buffers" << endl; - } - cout << "AudioCallbackPlaySourceFillThread: filling " << space << " frames" << endl; -#endif - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "buffered to " << f << " already" << endl; -#endif - - bool resample = (getSourceSampleRate() != getTargetSampleRate()); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << (resample ? "" : "not ") << "resampling (source " << getSourceSampleRate() << ", target " << getTargetSampleRate() << ")" << endl; -#endif - - int channels = getTargetChannelCount(); - - sv_frame_t orig = space; - sv_frame_t got = 0; - - static float **bufferPtrs = 0; - static int bufferPtrCount = 0; - - if (bufferPtrCount < channels) { - if (bufferPtrs) delete[] bufferPtrs; - bufferPtrs = new float *[channels]; - bufferPtrCount = channels; - } - - sv_frame_t generatorBlockSize = m_audioGenerator->getBlockSize(); - - if (resample && !m_converter) { - static bool warned = false; - if (!warned) { - cerr << "WARNING: sample rates differ, but no converter available!" << endl; - warned = true; - } - } - - if (resample && m_converter) { - - double ratio = - double(getTargetSampleRate()) / double(getSourceSampleRate()); - orig = sv_frame_t(double(orig) / ratio + 0.1); - - // orig must be a multiple of generatorBlockSize - orig = (orig / generatorBlockSize) * generatorBlockSize; - if (orig == 0) return false; - - sv_frame_t work = std::max(orig, space); - - // We only allocate one buffer, but we use it in two halves. - // We place the non-interleaved values in the second half of - // the buffer (orig samples for channel 0, orig samples for - // channel 1 etc), and then interleave them into the first - // half of the buffer. Then we resample back into the second - // half (interleaved) and de-interleave the results back to - // the start of the buffer for insertion into the ringbuffers. - // What a faff -- especially as we've already de-interleaved - // the audio data from the source file elsewhere before we - // even reach this point. - - if (tmpSize < channels * work * 2) { - delete[] tmp; - tmp = new float[channels * work * 2]; - tmpSize = channels * work * 2; - } - - float *nonintlv = tmp + channels * work; - float *intlv = tmp; - float *srcout = tmp + channels * work; - - for (int c = 0; c < channels; ++c) { - for (int i = 0; i < orig; ++i) { - nonintlv[channels * i + c] = 0.0f; - } - } - - for (int c = 0; c < channels; ++c) { - bufferPtrs[c] = nonintlv + c * orig; - } - - got = mixModels(f, orig, bufferPtrs); // also modifies f - - // and interleave into first half - for (int c = 0; c < channels; ++c) { - for (int i = 0; i < got; ++i) { - float sample = nonintlv[c * got + i]; - intlv[channels * i + c] = sample; - } - } - - SRC_DATA data; - data.data_in = intlv; - data.data_out = srcout; - data.input_frames = long(got); - data.output_frames = long(work); - data.src_ratio = ratio; - data.end_of_input = 0; - - int err = 0; - - if (m_timeStretcher && m_timeStretcher->getTimeRatio() < 0.4) { -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "Using crappy converter" << endl; -#endif - err = src_process(m_crapConverter, &data); - } else { - err = src_process(m_converter, &data); - } - - sv_frame_t toCopy = sv_frame_t(double(got) * ratio + 0.1); - - if (err) { - cerr - << "AudioCallbackPlaySourceFillThread: ERROR in samplerate conversion: " - << src_strerror(err) << endl; - //!!! Then what? - } else { - got = data.input_frames_used; - toCopy = data.output_frames_gen; -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "Resampled " << got << " frames to " << toCopy << " frames" << endl; -#endif - } - - for (int c = 0; c < channels; ++c) { - for (int i = 0; i < toCopy; ++i) { - tmp[i] = srcout[channels * i + c]; - } - RingBuffer<float> *wb = getWriteRingBuffer(c); - if (wb) wb->write(tmp, int(toCopy)); - } - - m_writeBufferFill = f; - if (readWriteEqual) m_readBufferFill = f; - - } else { - - // space must be a multiple of generatorBlockSize - sv_frame_t reqSpace = space; - space = (reqSpace / generatorBlockSize) * generatorBlockSize; - if (space == 0) { -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "requested fill of " << reqSpace - << " is less than generator block size of " - << generatorBlockSize << ", leaving it" << endl; -#endif - return false; - } - - if (tmpSize < channels * space) { - delete[] tmp; - tmp = new float[channels * space]; - tmpSize = channels * space; - } - - for (int c = 0; c < channels; ++c) { - - bufferPtrs[c] = tmp + c * space; - - for (int i = 0; i < space; ++i) { - tmp[c * space + i] = 0.0f; - } - } - - sv_frame_t got = mixModels(f, space, bufferPtrs); // also modifies f - - for (int c = 0; c < channels; ++c) { - - RingBuffer<float> *wb = getWriteRingBuffer(c); - if (wb) { - int actual = wb->write(bufferPtrs[c], int(got)); -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "Wrote " << actual << " samples for ch " << c << ", now " - << wb->getReadSpace() << " to read" - << endl; -#endif - if (actual < got) { - cerr << "WARNING: Buffer overrun in channel " << c - << ": wrote " << actual << " of " << got - << " samples" << endl; - } - } - } - - m_writeBufferFill = f; - if (readWriteEqual) m_readBufferFill = f; - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "Read buffer fill is now " << m_readBufferFill << endl; -#endif - - //!!! how do we know when ended? need to mark up a fully-buffered flag and check this if we find the buffers empty in getSourceSamples - } - - return true; -} - -sv_frame_t -AudioCallbackPlaySource::mixModels(sv_frame_t &frame, sv_frame_t count, float **buffers) -{ - sv_frame_t processed = 0; - sv_frame_t chunkStart = frame; - sv_frame_t chunkSize = count; - sv_frame_t selectionSize = 0; - sv_frame_t nextChunkStart = chunkStart + chunkSize; - - bool looping = m_viewManager->getPlayLoopMode(); - bool constrained = (m_viewManager->getPlaySelectionMode() && - !m_viewManager->getSelections().empty()); - - static float **chunkBufferPtrs = 0; - static int chunkBufferPtrCount = 0; - int channels = getTargetChannelCount(); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "Selection playback: start " << frame << ", size " << count <<", channels " << channels << endl; -#endif - - if (chunkBufferPtrCount < channels) { - if (chunkBufferPtrs) delete[] chunkBufferPtrs; - chunkBufferPtrs = new float *[channels]; - chunkBufferPtrCount = channels; - } - - for (int c = 0; c < channels; ++c) { - chunkBufferPtrs[c] = buffers[c]; - } - - while (processed < count) { - - chunkSize = count - processed; - nextChunkStart = chunkStart + chunkSize; - selectionSize = 0; - - sv_frame_t fadeIn = 0, fadeOut = 0; - - if (constrained) { - - sv_frame_t rChunkStart = - m_viewManager->alignPlaybackFrameToReference(chunkStart); - - Selection selection = - m_viewManager->getContainingSelection(rChunkStart, true); - - if (selection.isEmpty()) { - if (looping) { - selection = *m_viewManager->getSelections().begin(); - chunkStart = m_viewManager->alignReferenceToPlaybackFrame - (selection.getStartFrame()); - fadeIn = 50; - } - } - - if (selection.isEmpty()) { - - chunkSize = 0; - nextChunkStart = chunkStart; - - } else { - - sv_frame_t sf = m_viewManager->alignReferenceToPlaybackFrame - (selection.getStartFrame()); - sv_frame_t ef = m_viewManager->alignReferenceToPlaybackFrame - (selection.getEndFrame()); - - selectionSize = ef - sf; - - if (chunkStart < sf) { - chunkStart = sf; - fadeIn = 50; - } - - nextChunkStart = chunkStart + chunkSize; - - if (nextChunkStart >= ef) { - nextChunkStart = ef; - fadeOut = 50; - } - - chunkSize = nextChunkStart - chunkStart; - } - - } else if (looping && m_lastModelEndFrame > 0) { - - if (chunkStart >= m_lastModelEndFrame) { - chunkStart = 0; - } - if (chunkSize > m_lastModelEndFrame - chunkStart) { - chunkSize = m_lastModelEndFrame - chunkStart; - } - nextChunkStart = chunkStart + chunkSize; - } - -// cout << "chunkStart " << chunkStart << ", chunkSize " << chunkSize << ", nextChunkStart " << nextChunkStart << ", frame " << frame << ", count " << count << ", processed " << processed << endl; - - if (!chunkSize) { -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "Ending selection playback at " << nextChunkStart << endl; -#endif - // We need to maintain full buffers so that the other - // thread can tell where it's got to in the playback -- so - // return the full amount here - frame = frame + count; - return count; - } - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "Selection playback: chunk at " << chunkStart << " -> " << nextChunkStart << " (size " << chunkSize << ")" << endl; -#endif - - if (selectionSize < 100) { - fadeIn = 0; - fadeOut = 0; - } else if (selectionSize < 300) { - if (fadeIn > 0) fadeIn = 10; - if (fadeOut > 0) fadeOut = 10; - } - - if (fadeIn > 0) { - if (processed * 2 < fadeIn) { - fadeIn = processed * 2; - } - } - - if (fadeOut > 0) { - if ((count - processed - chunkSize) * 2 < fadeOut) { - fadeOut = (count - processed - chunkSize) * 2; - } - } - - for (std::set<Model *>::iterator mi = m_models.begin(); - mi != m_models.end(); ++mi) { - - (void) m_audioGenerator->mixModel(*mi, chunkStart, - chunkSize, chunkBufferPtrs, - fadeIn, fadeOut); - } - - for (int c = 0; c < channels; ++c) { - chunkBufferPtrs[c] += chunkSize; - } - - processed += chunkSize; - chunkStart = nextChunkStart; - } - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "Returning selection playback " << processed << " frames to " << nextChunkStart << endl; -#endif - - frame = nextChunkStart; - return processed; -} - -void -AudioCallbackPlaySource::unifyRingBuffers() -{ - if (m_readBuffers == m_writeBuffers) return; - - // only unify if there will be something to read - for (int c = 0; c < getTargetChannelCount(); ++c) { - RingBuffer<float> *wb = getWriteRingBuffer(c); - if (wb) { - if (wb->getReadSpace() < m_blockSize * 2) { - if ((m_writeBufferFill + m_blockSize * 2) < - m_lastModelEndFrame) { - // OK, we don't have enough and there's more to - // read -- don't unify until we can do better -#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING - SVDEBUG << "AudioCallbackPlaySource::unifyRingBuffers: Not unifying: write buffer has less (" << wb->getReadSpace() << ") than " << m_blockSize*2 << " to read and write buffer fill (" << m_writeBufferFill << ") is not close to end frame (" << m_lastModelEndFrame << ")" << endl; -#endif - return; - } - } - break; - } - } - - sv_frame_t rf = m_readBufferFill; - RingBuffer<float> *rb = getReadRingBuffer(0); - if (rb) { - int rs = rb->getReadSpace(); - //!!! incorrect when in non-contiguous selection, see comments elsewhere -// cout << "rs = " << rs << endl; - if (rs < rf) rf -= rs; - else rf = 0; - } - -#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING - SVDEBUG << "AudioCallbackPlaySource::unifyRingBuffers: m_readBufferFill = " << m_readBufferFill << ", rf = " << rf << ", m_writeBufferFill = " << m_writeBufferFill << endl; -#endif - - sv_frame_t wf = m_writeBufferFill; - sv_frame_t skip = 0; - for (int c = 0; c < getTargetChannelCount(); ++c) { - RingBuffer<float> *wb = getWriteRingBuffer(c); - if (wb) { - if (c == 0) { - - int wrs = wb->getReadSpace(); -// cout << "wrs = " << wrs << endl; - - if (wrs < wf) wf -= wrs; - else wf = 0; -// cout << "wf = " << wf << endl; - - if (wf < rf) skip = rf - wf; - if (skip == 0) break; - } - -// cout << "skipping " << skip << endl; - wb->skip(int(skip)); - } - } - - m_bufferScavenger.claim(m_readBuffers); - m_readBuffers = m_writeBuffers; - m_readBufferFill = m_writeBufferFill; -#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING - cerr << "unified" << endl; -#endif -} - -void -AudioCallbackPlaySource::FillThread::run() -{ - AudioCallbackPlaySource &s(m_source); - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySourceFillThread starting" << endl; -#endif - - s.m_mutex.lock(); - - bool previouslyPlaying = s.m_playing; - bool work = false; - - while (!s.m_exiting) { - - s.unifyRingBuffers(); - s.m_bufferScavenger.scavenge(); - s.m_pluginScavenger.scavenge(); - - if (work && s.m_playing && s.getSourceSampleRate()) { - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySourceFillThread: not waiting" << endl; -#endif - - s.m_mutex.unlock(); - s.m_mutex.lock(); - - } else { - - double ms = 100; - if (s.getSourceSampleRate() > 0) { - ms = double(s.m_ringBufferSize) / s.getSourceSampleRate() * 1000.0; - } - - if (s.m_playing) ms /= 10; - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - if (!s.m_playing) cout << endl; - cout << "AudioCallbackPlaySourceFillThread: waiting for " << ms << "ms..." << endl; -#endif - - s.m_condition.wait(&s.m_mutex, int(ms)); - } - -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySourceFillThread: awoken" << endl; -#endif - - work = false; - - if (!s.getSourceSampleRate()) { -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySourceFillThread: source sample rate is zero" << endl; -#endif - continue; - } - - bool playing = s.m_playing; - - if (playing && !previouslyPlaying) { -#ifdef DEBUG_AUDIO_PLAY_SOURCE - cout << "AudioCallbackPlaySourceFillThread: playback state changed, resetting" << endl; -#endif - for (int c = 0; c < s.getTargetChannelCount(); ++c) { - RingBuffer<float> *rb = s.getReadRingBuffer(c); - if (rb) rb->reset(); - } - } - previouslyPlaying = playing; - - work = s.fillBuffers(); - } - - s.m_mutex.unlock(); -} -
--- a/audioio/AudioCallbackPlaySource.h Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,384 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam and QMUL. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifndef _AUDIO_CALLBACK_PLAY_SOURCE_H_ -#define _AUDIO_CALLBACK_PLAY_SOURCE_H_ - -#include "base/RingBuffer.h" -#include "base/AudioPlaySource.h" -#include "base/PropertyContainer.h" -#include "base/Scavenger.h" - -#include <QObject> -#include <QMutex> -#include <QWaitCondition> - -#include "base/Thread.h" -#include "base/RealTime.h" - -#include <samplerate.h> - -#include <set> -#include <map> - -namespace RubberBand { - class RubberBandStretcher; -} - -class Model; -class ViewManagerBase; -class AudioGenerator; -class PlayParameters; -class RealTimePluginInstance; -class AudioCallbackPlayTarget; - -/** - * AudioCallbackPlaySource manages audio data supply to callback-based - * audio APIs such as JACK or CoreAudio. It maintains one ring buffer - * per channel, filled during playback by a non-realtime thread, and - * provides a method for a realtime thread to pick up the latest - * available sample data from these buffers. - */ -class AudioCallbackPlaySource : public QObject, - public AudioPlaySource -{ - Q_OBJECT - -public: - AudioCallbackPlaySource(ViewManagerBase *, QString clientName); - virtual ~AudioCallbackPlaySource(); - - /** - * Add a data model to be played from. The source can mix - * playback from a number of sources including dense and sparse - * models. The models must match in sample rate, but they don't - * have to have identical numbers of channels. - */ - virtual void addModel(Model *model); - - /** - * Remove a model. - */ - virtual void removeModel(Model *model); - - /** - * Remove all models. (Silence will ensue.) - */ - virtual void clearModels(); - - /** - * Start making data available in the ring buffers for playback, - * from the given frame. If playback is already under way, reseek - * to the given frame and continue. - */ - virtual void play(sv_frame_t startFrame); - - /** - * Stop playback and ensure that no more data is returned. - */ - virtual void stop(); - - /** - * Return whether playback is currently supposed to be happening. - */ - virtual bool isPlaying() const { return m_playing; } - - /** - * Return the frame number that is currently expected to be coming - * out of the speakers. (i.e. compensating for playback latency.) - */ - virtual sv_frame_t getCurrentPlayingFrame(); - - /** - * Return the last frame that would come out of the speakers if we - * stopped playback right now. - */ - virtual sv_frame_t getCurrentBufferedFrame(); - - /** - * Return the frame at which playback is expected to end (if not looping). - */ - virtual sv_frame_t getPlayEndFrame() { return m_lastModelEndFrame; } - - /** - * Set the target and the block size of the target audio device. - * This should be called by the target class. - */ - void setTarget(AudioCallbackPlayTarget *, int blockSize); - - /** - * Get the block size of the target audio device. This may be an - * estimate or upper bound, if the target has a variable block - * size; the source should behave itself even if this value turns - * out to be inaccurate. - */ - int getTargetBlockSize() const; - - /** - * Set the playback latency of the target audio device, in frames - * at the target sample rate. This is the difference between the - * frame currently "leaving the speakers" and the last frame (or - * highest last frame across all channels) requested via - * getSamples(). The default is zero. - */ - void setTargetPlayLatency(sv_frame_t); - - /** - * Get the playback latency of the target audio device. - */ - sv_frame_t getTargetPlayLatency() const; - - /** - * Specify that the target audio device has a fixed sample rate - * (i.e. cannot accommodate arbitrary sample rates based on the - * source). If the target sets this to something other than the - * source sample rate, this class will resample automatically to - * fit. - */ - void setTargetSampleRate(sv_samplerate_t); - - /** - * Return the sample rate set by the target audio device (or the - * source sample rate if the target hasn't set one). - */ - virtual sv_samplerate_t getTargetSampleRate() const; - - /** - * Set the current output levels for metering (for call from the - * target) - */ - void setOutputLevels(float left, float right); - - /** - * Return the current (or thereabouts) output levels in the range - * 0.0 -> 1.0, for metering purposes. - */ - virtual bool getOutputLevels(float &left, float &right); - - /** - * Get the number of channels of audio that in the source models. - * This may safely be called from a realtime thread. Returns 0 if - * there is no source yet available. - */ - int getSourceChannelCount() const; - - /** - * Get the number of channels of audio that will be provided - * to the play target. This may be more than the source channel - * count: for example, a mono source will provide 2 channels - * after pan. - * This may safely be called from a realtime thread. Returns 0 if - * there is no source yet available. - */ - int getTargetChannelCount() const; - - /** - * Get the actual sample rate of the source material. This may - * safely be called from a realtime thread. Returns 0 if there is - * no source yet available. - */ - virtual sv_samplerate_t getSourceSampleRate() const; - - /** - * Get "count" samples (at the target sample rate) of the mixed - * audio data, in all channels. This may safely be called from a - * realtime thread. - */ - sv_frame_t getSourceSamples(sv_frame_t count, float **buffer); - - /** - * Set the time stretcher factor (i.e. playback speed). - */ - void setTimeStretch(double factor); - - /** - * Set the resampler quality, 0 - 2 where 0 is fastest and 2 is - * highest quality. - */ - void setResampleQuality(int q); - - /** - * Set a single real-time plugin as a processing effect for - * auditioning during playback. - * - * The plugin must have been initialised with - * getTargetChannelCount() channels and a getTargetBlockSize() - * sample frame processing block size. - * - * This playback source takes ownership of the plugin, which will - * be deleted at some point after the following call to - * setAuditioningEffect (depending on real-time constraints). - * - * Pass a null pointer to remove the current auditioning plugin, - * if any. - */ - void setAuditioningEffect(Auditionable *plugin); - - /** - * Specify that only the given set of models should be played. - */ - void setSoloModelSet(std::set<Model *>s); - - /** - * Specify that all models should be played as normal (if not - * muted). - */ - void clearSoloModelSet(); - - QString getClientName() const { return m_clientName; } - -signals: - void modelReplaced(); - - void playStatusChanged(bool isPlaying); - - void sampleRateMismatch(sv_samplerate_t requested, - sv_samplerate_t available, - bool willResample); - - void audioOverloadPluginDisabled(); - void audioTimeStretchMultiChannelDisabled(); - - void activity(QString); - -public slots: - void audioProcessingOverload(); - -protected slots: - void selectionChanged(); - void playLoopModeChanged(); - void playSelectionModeChanged(); - void playParametersChanged(PlayParameters *); - void preferenceChanged(PropertyContainer::PropertyName); - void modelChangedWithin(sv_frame_t startFrame, sv_frame_t endFrame); - -protected: - ViewManagerBase *m_viewManager; - AudioGenerator *m_audioGenerator; - QString m_clientName; - - class RingBufferVector : public std::vector<RingBuffer<float> *> { - public: - virtual ~RingBufferVector() { - while (!empty()) { - delete *begin(); - erase(begin()); - } - } - }; - - std::set<Model *> m_models; - RingBufferVector *m_readBuffers; - RingBufferVector *m_writeBuffers; - sv_frame_t m_readBufferFill; - sv_frame_t m_writeBufferFill; - Scavenger<RingBufferVector> m_bufferScavenger; - int m_sourceChannelCount; - sv_frame_t m_blockSize; - sv_samplerate_t m_sourceSampleRate; - sv_samplerate_t m_targetSampleRate; - sv_frame_t m_playLatency; - AudioCallbackPlayTarget *m_target; - double m_lastRetrievalTimestamp; - sv_frame_t m_lastRetrievedBlockSize; - bool m_trustworthyTimestamps; - sv_frame_t m_lastCurrentFrame; - bool m_playing; - bool m_exiting; - sv_frame_t m_lastModelEndFrame; - int m_ringBufferSize; - float m_outputLeft; - float m_outputRight; - RealTimePluginInstance *m_auditioningPlugin; - bool m_auditioningPluginBypassed; - Scavenger<RealTimePluginInstance> m_pluginScavenger; - sv_frame_t m_playStartFrame; - bool m_playStartFramePassed; - RealTime m_playStartedAt; - - RingBuffer<float> *getWriteRingBuffer(int c) { - if (m_writeBuffers && c < (int)m_writeBuffers->size()) { - return (*m_writeBuffers)[c]; - } else { - return 0; - } - } - - RingBuffer<float> *getReadRingBuffer(int c) { - RingBufferVector *rb = m_readBuffers; - if (rb && c < (int)rb->size()) { - return (*rb)[c]; - } else { - return 0; - } - } - - void clearRingBuffers(bool haveLock = false, int count = 0); - void unifyRingBuffers(); - - RubberBand::RubberBandStretcher *m_timeStretcher; - RubberBand::RubberBandStretcher *m_monoStretcher; - double m_stretchRatio; - bool m_stretchMono; - - int m_stretcherInputCount; - float **m_stretcherInputs; - sv_frame_t *m_stretcherInputSizes; - - // Called from fill thread, m_playing true, mutex held - // Return true if work done - bool fillBuffers(); - - // Called from fillBuffers. Return the number of frames written, - // which will be count or fewer. Return in the frame argument the - // new buffered frame position (which may be earlier than the - // frame argument passed in, in the case of looping). - sv_frame_t mixModels(sv_frame_t &frame, sv_frame_t count, float **buffers); - - // Called from getSourceSamples. - void applyAuditioningEffect(sv_frame_t count, float **buffers); - - // Ranges of current selections, if play selection is active - std::vector<RealTime> m_rangeStarts; - std::vector<RealTime> m_rangeDurations; - void rebuildRangeLists(); - - sv_frame_t getCurrentFrame(RealTime outputLatency); - - class FillThread : public Thread - { - public: - FillThread(AudioCallbackPlaySource &source) : - Thread(Thread::NonRTThread), - m_source(source) { } - - virtual void run(); - - protected: - AudioCallbackPlaySource &m_source; - }; - - QMutex m_mutex; - QWaitCondition m_condition; - FillThread *m_fillThread; - SRC_STATE *m_converter; - SRC_STATE *m_crapConverter; // for use when playing very fast - int m_resampleQuality; - void initialiseConverter(); -}; - -#endif - -
--- a/audioio/AudioCallbackPlayTarget.cpp Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,40 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#include "AudioCallbackPlayTarget.h" -#include "AudioCallbackPlaySource.h" - -#include <iostream> - -AudioCallbackPlayTarget::AudioCallbackPlayTarget(AudioCallbackPlaySource *source) : - m_source(source), - m_outputGain(1.0) -{ - if (m_source) { - connect(m_source, SIGNAL(modelReplaced()), - this, SLOT(sourceModelReplaced())); - } -} - -AudioCallbackPlayTarget::~AudioCallbackPlayTarget() -{ -} - -void -AudioCallbackPlayTarget::setOutputGain(float gain) -{ - m_outputGain = gain; -} -
--- a/audioio/AudioCallbackPlayTarget.h Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,63 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifndef _AUDIO_CALLBACK_PLAY_TARGET_H_ -#define _AUDIO_CALLBACK_PLAY_TARGET_H_ - -#include <QObject> - -class AudioCallbackPlaySource; - -class AudioCallbackPlayTarget : public QObject -{ - Q_OBJECT - -public: - AudioCallbackPlayTarget(AudioCallbackPlaySource *source); - virtual ~AudioCallbackPlayTarget(); - - virtual bool isOK() const = 0; - - virtual void shutdown() = 0; - - virtual double getCurrentTime() const = 0; - - float getOutputGain() const { - return m_outputGain; - } - -public slots: - /** - * Set the playback gain (0.0 = silence, 1.0 = levels unmodified) - */ - virtual void setOutputGain(float gain); - - /** - * The main source model (providing the playback sample rate) has - * been changed. The target should query the source's sample - * rate, set its output sample rate accordingly, and call back on - * the source's setTargetSampleRate to indicate what sample rate - * it succeeded in setting at the output. If this differs from - * the model rate, the source will resample. - */ - virtual void sourceModelReplaced() = 0; - -protected: - AudioCallbackPlaySource *m_source; - float m_outputGain; -}; - -#endif -
--- a/audioio/AudioGenerator.cpp Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,709 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#include "AudioGenerator.h" - -#include "base/TempDirectory.h" -#include "base/PlayParameters.h" -#include "base/PlayParameterRepository.h" -#include "base/Pitch.h" -#include "base/Exceptions.h" - -#include "data/model/NoteModel.h" -#include "data/model/FlexiNoteModel.h" -#include "data/model/DenseTimeValueModel.h" -#include "data/model/SparseTimeValueModel.h" -#include "data/model/SparseOneDimensionalModel.h" -#include "data/model/NoteData.h" - -#include "ClipMixer.h" -#include "ContinuousSynth.h" - -#include <iostream> -#include <cmath> - -#include <QDir> -#include <QFile> - -const sv_frame_t -AudioGenerator::m_processingBlockSize = 1024; - -QString -AudioGenerator::m_sampleDir = ""; - -//#define DEBUG_AUDIO_GENERATOR 1 - -AudioGenerator::AudioGenerator() : - m_sourceSampleRate(0), - m_targetChannelCount(1), - m_waveType(0), - m_soloing(false), - m_channelBuffer(0), - m_channelBufSiz(0), - m_channelBufCount(0) -{ - initialiseSampleDir(); - - connect(PlayParameterRepository::getInstance(), - SIGNAL(playClipIdChanged(const Playable *, QString)), - this, - SLOT(playClipIdChanged(const Playable *, QString))); -} - -AudioGenerator::~AudioGenerator() -{ -#ifdef DEBUG_AUDIO_GENERATOR - SVDEBUG << "AudioGenerator::~AudioGenerator" << endl; -#endif -} - -void -AudioGenerator::initialiseSampleDir() -{ - if (m_sampleDir != "") return; - - try { - m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples"); - } catch (DirectoryCreationFailed f) { - cerr << "WARNING: AudioGenerator::initialiseSampleDir:" - << " Failed to create temporary sample directory" - << endl; - m_sampleDir = ""; - return; - } - - QDir sampleResourceDir(":/samples", "*.wav"); - - for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) { - - QString fileName(sampleResourceDir[i]); - QFile file(sampleResourceDir.filePath(fileName)); - QString target = QDir(m_sampleDir).filePath(fileName); - - if (!file.copy(target)) { - cerr << "WARNING: AudioGenerator::getSampleDir: " - << "Unable to copy " << fileName - << " into temporary directory \"" - << m_sampleDir << "\"" << endl; - } else { - QFile tf(target); - tf.setPermissions(tf.permissions() | - QFile::WriteOwner | - QFile::WriteUser); - } - } -} - -bool -AudioGenerator::addModel(Model *model) -{ - if (m_sourceSampleRate == 0) { - - m_sourceSampleRate = model->getSampleRate(); - - } else { - - DenseTimeValueModel *dtvm = - dynamic_cast<DenseTimeValueModel *>(model); - - if (dtvm) { - m_sourceSampleRate = model->getSampleRate(); - return true; - } - } - - const Playable *playable = model; - if (!playable || !playable->canPlay()) return 0; - - PlayParameters *parameters = - PlayParameterRepository::getInstance()->getPlayParameters(playable); - - bool willPlay = !parameters->isPlayMuted(); - - if (usesClipMixer(model)) { - ClipMixer *mixer = makeClipMixerFor(model); - if (mixer) { - QMutexLocker locker(&m_mutex); - m_clipMixerMap[model] = mixer; - return willPlay; - } - } - - if (usesContinuousSynth(model)) { - ContinuousSynth *synth = makeSynthFor(model); - if (synth) { - QMutexLocker locker(&m_mutex); - m_continuousSynthMap[model] = synth; - return willPlay; - } - } - - return false; -} - -void -AudioGenerator::playClipIdChanged(const Playable *playable, QString) -{ - const Model *model = dynamic_cast<const Model *>(playable); - if (!model) { - cerr << "WARNING: AudioGenerator::playClipIdChanged: playable " - << playable << " is not a supported model type" - << endl; - return; - } - - if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return; - - ClipMixer *mixer = makeClipMixerFor(model); - if (mixer) { - QMutexLocker locker(&m_mutex); - m_clipMixerMap[model] = mixer; - } -} - -bool -AudioGenerator::usesClipMixer(const Model *model) -{ - bool clip = - (qobject_cast<const SparseOneDimensionalModel *>(model) || - qobject_cast<const NoteModel *>(model) || - qobject_cast<const FlexiNoteModel *>(model)); - return clip; -} - -bool -AudioGenerator::wantsQuieterClips(const Model *model) -{ - // basically, anything that usually has sustain (like notes) or - // often has multiple sounds at once (like notes) wants to use a - // quieter level than simple click tracks - bool does = - (qobject_cast<const NoteModel *>(model) || - qobject_cast<const FlexiNoteModel *>(model)); - return does; -} - -bool -AudioGenerator::usesContinuousSynth(const Model *model) -{ - bool cont = - (qobject_cast<const SparseTimeValueModel *>(model)); - return cont; -} - -ClipMixer * -AudioGenerator::makeClipMixerFor(const Model *model) -{ - QString clipId; - - const Playable *playable = model; - if (!playable || !playable->canPlay()) return 0; - - PlayParameters *parameters = - PlayParameterRepository::getInstance()->getPlayParameters(playable); - if (parameters) { - clipId = parameters->getPlayClipId(); - } - -#ifdef DEBUG_AUDIO_GENERATOR - std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl; -#endif - - if (clipId == "") { - SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl; - return 0; - } - - ClipMixer *mixer = new ClipMixer(m_targetChannelCount, - m_sourceSampleRate, - m_processingBlockSize); - - double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required - - QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId); - - double level = wantsQuieterClips(model) ? 0.5 : 1.0; - if (!mixer->loadClipData(clipPath, clipF0, level)) { - delete mixer; - return 0; - } - -#ifdef DEBUG_AUDIO_GENERATOR - std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl; -#endif - - return mixer; -} - -ContinuousSynth * -AudioGenerator::makeSynthFor(const Model *model) -{ - const Playable *playable = model; - if (!playable || !playable->canPlay()) return 0; - - ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount, - m_sourceSampleRate, - m_processingBlockSize, - m_waveType); - -#ifdef DEBUG_AUDIO_GENERATOR - std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl; -#endif - - return synth; -} - -void -AudioGenerator::removeModel(Model *model) -{ - SparseOneDimensionalModel *sodm = - dynamic_cast<SparseOneDimensionalModel *>(model); - if (!sodm) return; // nothing to do - - QMutexLocker locker(&m_mutex); - - if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return; - - ClipMixer *mixer = m_clipMixerMap[sodm]; - m_clipMixerMap.erase(sodm); - delete mixer; -} - -void -AudioGenerator::clearModels() -{ - QMutexLocker locker(&m_mutex); - - while (!m_clipMixerMap.empty()) { - ClipMixer *mixer = m_clipMixerMap.begin()->second; - m_clipMixerMap.erase(m_clipMixerMap.begin()); - delete mixer; - } -} - -void -AudioGenerator::reset() -{ - QMutexLocker locker(&m_mutex); - -#ifdef DEBUG_AUDIO_GENERATOR - cerr << "AudioGenerator::reset()" << endl; -#endif - - for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { - if (i->second) { - i->second->reset(); - } - } - - m_noteOffs.clear(); -} - -void -AudioGenerator::setTargetChannelCount(int targetChannelCount) -{ - if (m_targetChannelCount == targetChannelCount) return; - -// SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl; - - QMutexLocker locker(&m_mutex); - m_targetChannelCount = targetChannelCount; - - for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) { - if (i->second) i->second->setChannelCount(targetChannelCount); - } -} - -sv_frame_t -AudioGenerator::getBlockSize() const -{ - return m_processingBlockSize; -} - -void -AudioGenerator::setSoloModelSet(std::set<Model *> s) -{ - QMutexLocker locker(&m_mutex); - - m_soloModelSet = s; - m_soloing = true; -} - -void -AudioGenerator::clearSoloModelSet() -{ - QMutexLocker locker(&m_mutex); - - m_soloModelSet.clear(); - m_soloing = false; -} - -sv_frame_t -AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount, - float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut) -{ - if (m_sourceSampleRate == 0) { - cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; - return frameCount; - } - - QMutexLocker locker(&m_mutex); - - Playable *playable = model; - if (!playable || !playable->canPlay()) return frameCount; - - PlayParameters *parameters = - PlayParameterRepository::getInstance()->getPlayParameters(playable); - if (!parameters) return frameCount; - - bool playing = !parameters->isPlayMuted(); - if (!playing) { -#ifdef DEBUG_AUDIO_GENERATOR - cout << "AudioGenerator::mixModel(" << model << "): muted" << endl; -#endif - return frameCount; - } - - if (m_soloing) { - if (m_soloModelSet.find(model) == m_soloModelSet.end()) { -#ifdef DEBUG_AUDIO_GENERATOR - cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl; -#endif - return frameCount; - } - } - - float gain = parameters->getPlayGain(); - float pan = parameters->getPlayPan(); - - DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model); - if (dtvm) { - return mixDenseTimeValueModel(dtvm, startFrame, frameCount, - buffer, gain, pan, fadeIn, fadeOut); - } - - if (usesClipMixer(model)) { - return mixClipModel(model, startFrame, frameCount, - buffer, gain, pan); - } - - if (usesContinuousSynth(model)) { - return mixContinuousSynthModel(model, startFrame, frameCount, - buffer, gain, pan); - } - - std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl; - - return frameCount; -} - -sv_frame_t -AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, - sv_frame_t startFrame, sv_frame_t frames, - float **buffer, float gain, float pan, - sv_frame_t fadeIn, sv_frame_t fadeOut) -{ - sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut); - - int modelChannels = dtvm->getChannelCount(); - - if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) { - - for (int c = 0; c < m_channelBufCount; ++c) { - delete[] m_channelBuffer[c]; - } - - delete[] m_channelBuffer; - m_channelBuffer = new float *[modelChannels]; - - for (int c = 0; c < modelChannels; ++c) { - m_channelBuffer[c] = new float[maxFrames]; - } - - m_channelBufCount = modelChannels; - m_channelBufSiz = maxFrames; - } - - sv_frame_t got = 0; - - if (startFrame >= fadeIn/2) { - got = dtvm->getMultiChannelData(0, modelChannels - 1, - startFrame - fadeIn/2, - frames + fadeOut/2 + fadeIn/2, - m_channelBuffer); - } else { - sv_frame_t missing = fadeIn/2 - startFrame; - - for (int c = 0; c < modelChannels; ++c) { - m_channelBuffer[c] += missing; - } - - if (missing > 0) { - cerr << "note: channelBufSiz = " << m_channelBufSiz - << ", frames + fadeOut/2 = " << frames + fadeOut/2 - << ", startFrame = " << startFrame - << ", missing = " << missing << endl; - } - - got = dtvm->getMultiChannelData(0, modelChannels - 1, - startFrame, - frames + fadeOut/2, - m_channelBuffer); - - for (int c = 0; c < modelChannels; ++c) { - m_channelBuffer[c] -= missing; - } - - got += missing; - } - - for (int c = 0; c < m_targetChannelCount; ++c) { - - int sourceChannel = (c % modelChannels); - -// SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; - - float channelGain = gain; - if (pan != 0.0) { - if (c == 0) { - if (pan > 0.0) channelGain *= 1.0f - pan; - } else { - if (pan < 0.0) channelGain *= pan + 1.0f; - } - } - - for (sv_frame_t i = 0; i < fadeIn/2; ++i) { - float *back = buffer[c]; - back -= fadeIn/2; - back[i] += - (channelGain * m_channelBuffer[sourceChannel][i] * float(i)) - / float(fadeIn); - } - - for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) { - float mult = channelGain; - if (i < fadeIn/2) { - mult = (mult * float(i)) / float(fadeIn); - } - if (i > frames - fadeOut/2) { - mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut); - } - float val = m_channelBuffer[sourceChannel][i]; - if (i >= got) val = 0.f; - buffer[c][i] += mult * val; - } - } - - return got; -} - -sv_frame_t -AudioGenerator::mixClipModel(Model *model, - sv_frame_t startFrame, sv_frame_t frames, - float **buffer, float gain, float pan) -{ - ClipMixer *clipMixer = m_clipMixerMap[model]; - if (!clipMixer) return 0; - - int blocks = int(frames / m_processingBlockSize); - - //!!! todo: the below -- it matters - - //!!! hang on -- the fact that the audio callback play source's - //buffer is a multiple of the plugin's buffer size doesn't mean - //that we always get called for a multiple of it here (because it - //also depends on the JACK block size). how should we ensure that - //all models write the same amount in to the mix, and that we - //always have a multiple of the plugin buffer size? I guess this - //class has to be queryable for the plugin buffer size & the - //callback play source has to use that as a multiple for all the - //calls to mixModel - - sv_frame_t got = blocks * m_processingBlockSize; - -#ifdef DEBUG_AUDIO_GENERATOR - cout << "mixModel [clip]: start " << startFrame << ", frames " << frames - << ", blocks " << blocks << ", have " << m_noteOffs.size() - << " note-offs" << endl; -#endif - - ClipMixer::NoteStart on; - ClipMixer::NoteEnd off; - - NoteOffSet ¬eOffs = m_noteOffs[model]; - - float **bufferIndexes = new float *[m_targetChannelCount]; - - for (int i = 0; i < blocks; ++i) { - - sv_frame_t reqStart = startFrame + i * m_processingBlockSize; - - NoteList notes; - NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); - if (exportable) { - notes = exportable->getNotesWithin(reqStart, - reqStart + m_processingBlockSize); - } - - std::vector<ClipMixer::NoteStart> starts; - std::vector<ClipMixer::NoteEnd> ends; - - for (NoteList::const_iterator ni = notes.begin(); - ni != notes.end(); ++ni) { - - sv_frame_t noteFrame = ni->start; - - if (noteFrame < reqStart || - noteFrame >= reqStart + m_processingBlockSize) continue; - - while (noteOffs.begin() != noteOffs.end() && - noteOffs.begin()->frame <= noteFrame) { - - sv_frame_t eventFrame = noteOffs.begin()->frame; - if (eventFrame < reqStart) eventFrame = reqStart; - - off.frameOffset = eventFrame - reqStart; - off.frequency = noteOffs.begin()->frequency; - -#ifdef DEBUG_AUDIO_GENERATOR - cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl; -#endif - - ends.push_back(off); - noteOffs.erase(noteOffs.begin()); - } - - on.frameOffset = noteFrame - reqStart; - on.frequency = ni->getFrequency(); - on.level = float(ni->velocity) / 127.0f; - on.pan = pan; - -#ifdef DEBUG_AUDIO_GENERATOR - cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl; -#endif - - starts.push_back(on); - noteOffs.insert - (NoteOff(on.frequency, noteFrame + ni->duration)); - } - - while (noteOffs.begin() != noteOffs.end() && - noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { - - sv_frame_t eventFrame = noteOffs.begin()->frame; - if (eventFrame < reqStart) eventFrame = reqStart; - - off.frameOffset = eventFrame - reqStart; - off.frequency = noteOffs.begin()->frequency; - -#ifdef DEBUG_AUDIO_GENERATOR - cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl; -#endif - - ends.push_back(off); - noteOffs.erase(noteOffs.begin()); - } - - for (int c = 0; c < m_targetChannelCount; ++c) { - bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; - } - - clipMixer->mix(bufferIndexes, gain, starts, ends); - } - - delete[] bufferIndexes; - - return got; -} - -sv_frame_t -AudioGenerator::mixContinuousSynthModel(Model *model, - sv_frame_t startFrame, - sv_frame_t frames, - float **buffer, - float gain, - float pan) -{ - ContinuousSynth *synth = m_continuousSynthMap[model]; - if (!synth) return 0; - - // only type we support here at the moment - SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); - if (stvm->getScaleUnits() != "Hz") return 0; - - int blocks = int(frames / m_processingBlockSize); - - //!!! todo: see comment in mixClipModel - - sv_frame_t got = blocks * m_processingBlockSize; - -#ifdef DEBUG_AUDIO_GENERATOR - cout << "mixModel [synth]: frames " << frames - << ", blocks " << blocks << endl; -#endif - - float **bufferIndexes = new float *[m_targetChannelCount]; - - for (int i = 0; i < blocks; ++i) { - - sv_frame_t reqStart = startFrame + i * m_processingBlockSize; - - for (int c = 0; c < m_targetChannelCount; ++c) { - bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; - } - - SparseTimeValueModel::PointList points = - stvm->getPoints(reqStart, reqStart + m_processingBlockSize); - - // by default, repeat last frequency - float f0 = 0.f; - - // go straight to the last freq that is genuinely in this range - for (SparseTimeValueModel::PointList::const_iterator itr = points.end(); - itr != points.begin(); ) { - --itr; - if (itr->frame >= reqStart && - itr->frame < reqStart + m_processingBlockSize) { - f0 = itr->value; - break; - } - } - - // if we found no such frequency and the next point is further - // away than twice the model resolution, go silent (same - // criterion TimeValueLayer uses for ending a discrete curve - // segment) - if (f0 == 0.f) { - SparseTimeValueModel::PointList nextPoints = - stvm->getNextPoints(reqStart + m_processingBlockSize); - if (nextPoints.empty() || - nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) { - f0 = -1.f; - } - } - -// cerr << "f0 = " << f0 << endl; - - synth->mix(bufferIndexes, - gain, - pan, - f0); - } - - delete[] bufferIndexes; - - return got; -} -
--- a/audioio/AudioGenerator.h Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,168 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifndef _AUDIO_GENERATOR_H_ -#define _AUDIO_GENERATOR_H_ - -class Model; -class NoteModel; -class FlexiNoteModel; -class DenseTimeValueModel; -class SparseOneDimensionalModel; -class Playable; -class ClipMixer; -class ContinuousSynth; - -#include <QObject> -#include <QMutex> - -#include <set> -#include <map> -#include <vector> - -#include "base/BaseTypes.h" - -class AudioGenerator : public QObject -{ - Q_OBJECT - -public: - AudioGenerator(); - virtual ~AudioGenerator(); - - /** - * Add a data model to be played from and initialise any necessary - * audio generation code. Returns true if the model will be - * played. The model will be added regardless of the return - * value. - */ - virtual bool addModel(Model *model); - - /** - * Remove a model. - */ - virtual void removeModel(Model *model); - - /** - * Remove all models. - */ - virtual void clearModels(); - - /** - * Reset playback, clearing buffers and the like. - */ - virtual void reset(); - - /** - * Set the target channel count. The buffer parameter to mixModel - * must always point to at least this number of arrays. - */ - virtual void setTargetChannelCount(int channelCount); - - /** - * Return the internal processing block size. The frameCount - * argument to all mixModel calls must be a multiple of this - * value. - */ - virtual sv_frame_t getBlockSize() const; - - /** - * Mix a single model into an output buffer. - */ - virtual sv_frame_t mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount, - float **buffer, sv_frame_t fadeIn = 0, sv_frame_t fadeOut = 0); - - /** - * Specify that only the given set of models should be played. - */ - virtual void setSoloModelSet(std::set<Model *>s); - - /** - * Specify that all models should be played as normal (if not - * muted). - */ - virtual void clearSoloModelSet(); - -protected slots: - void playClipIdChanged(const Playable *, QString); - -protected: - sv_samplerate_t m_sourceSampleRate; - int m_targetChannelCount; - int m_waveType; - - bool m_soloing; - std::set<Model *> m_soloModelSet; - - struct NoteOff { - - NoteOff(float _freq, sv_frame_t _frame) : frequency(_freq), frame(_frame) { } - - float frequency; - sv_frame_t frame; - - struct Comparator { - bool operator()(const NoteOff &n1, const NoteOff &n2) const { - return n1.frame < n2.frame; - } - }; - }; - - - typedef std::map<const Model *, ClipMixer *> ClipMixerMap; - - typedef std::multiset<NoteOff, NoteOff::Comparator> NoteOffSet; - typedef std::map<const Model *, NoteOffSet> NoteOffMap; - - typedef std::map<const Model *, ContinuousSynth *> ContinuousSynthMap; - - QMutex m_mutex; - - ClipMixerMap m_clipMixerMap; - NoteOffMap m_noteOffs; - static QString m_sampleDir; - - ContinuousSynthMap m_continuousSynthMap; - - bool usesClipMixer(const Model *); - bool wantsQuieterClips(const Model *); - bool usesContinuousSynth(const Model *); - - ClipMixer *makeClipMixerFor(const Model *model); - ContinuousSynth *makeSynthFor(const Model *model); - - static void initialiseSampleDir(); - - virtual sv_frame_t mixDenseTimeValueModel - (DenseTimeValueModel *model, sv_frame_t startFrame, sv_frame_t frameCount, - float **buffer, float gain, float pan, sv_frame_t fadeIn, sv_frame_t fadeOut); - - virtual sv_frame_t mixClipModel - (Model *model, sv_frame_t startFrame, sv_frame_t frameCount, - float **buffer, float gain, float pan); - - virtual sv_frame_t mixContinuousSynthModel - (Model *model, sv_frame_t startFrame, sv_frame_t frameCount, - float **buffer, float gain, float pan); - - static const sv_frame_t m_processingBlockSize; - - float **m_channelBuffer; - sv_frame_t m_channelBufSiz; - int m_channelBufCount; -}; - -#endif -
--- a/audioio/AudioJACKTarget.cpp Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,487 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifdef HAVE_JACK - -#include "AudioJACKTarget.h" -#include "AudioCallbackPlaySource.h" - -#include <iostream> -#include <cmath> - -#include <alloca.h> - -//#define DEBUG_AUDIO_JACK_TARGET 1 - -#ifdef BUILD_STATIC -#ifdef Q_OS_LINUX - -// Some lunacy to enable JACK support in static builds. JACK isn't -// supposed to be linked statically, because it depends on a -// consistent shared memory layout between client library and daemon, -// so it's very fragile in the face of version mismatches. -// -// Therefore for static builds on Linux we avoid linking against JACK -// at all during the build, instead using dlopen and runtime symbol -// lookup to switch on JACK support at runtime. The following big -// mess (down to the #endifs) is the code that implements this. - -static void *symbol(const char *name) -{ - static bool attempted = false; - static void *library = 0; - static std::map<const char *, void *> symbols; - if (symbols.find(name) != symbols.end()) return symbols[name]; - if (!library) { - if (!attempted) { - library = ::dlopen("libjack.so.1", RTLD_NOW); - if (!library) library = ::dlopen("libjack.so.0", RTLD_NOW); - if (!library) library = ::dlopen("libjack.so", RTLD_NOW); - if (!library) { - cerr << "WARNING: AudioJACKTarget: Failed to load JACK library: " - << ::dlerror() << " (tried .so, .so.0, .so.1)" - << endl; - } - attempted = true; - } - if (!library) return 0; - } - void *symbol = ::dlsym(library, name); - if (!symbol) { - cerr << "WARNING: AudioJACKTarget: Failed to locate symbol " - << name << ": " << ::dlerror() << endl; - } - symbols[name] = symbol; - return symbol; -} - -static jack_client_t *dynamic_jack_client_open(const char *client_name, - jack_options_t options, - jack_status_t *status, ...) -{ - typedef jack_client_t *(*func)(const char *client_name, - jack_options_t options, - jack_status_t *status, ...); - void *s = symbol("jack_client_open"); - if (!s) return 0; - func f = (func)s; - return f(client_name, options, status); // varargs not supported here -} - -static int dynamic_jack_set_process_callback(jack_client_t *client, - JackProcessCallback process_callback, - void *arg) -{ - typedef int (*func)(jack_client_t *client, - JackProcessCallback process_callback, - void *arg); - void *s = symbol("jack_set_process_callback"); - if (!s) return 1; - func f = (func)s; - return f(client, process_callback, arg); -} - -static int dynamic_jack_set_xrun_callback(jack_client_t *client, - JackXRunCallback xrun_callback, - void *arg) -{ - typedef int (*func)(jack_client_t *client, - JackXRunCallback xrun_callback, - void *arg); - void *s = symbol("jack_set_xrun_callback"); - if (!s) return 1; - func f = (func)s; - return f(client, xrun_callback, arg); -} - -static const char **dynamic_jack_get_ports(jack_client_t *client, - const char *port_name_pattern, - const char *type_name_pattern, - unsigned long flags) -{ - typedef const char **(*func)(jack_client_t *client, - const char *port_name_pattern, - const char *type_name_pattern, - unsigned long flags); - void *s = symbol("jack_get_ports"); - if (!s) return 0; - func f = (func)s; - return f(client, port_name_pattern, type_name_pattern, flags); -} - -static jack_port_t *dynamic_jack_port_register(jack_client_t *client, - const char *port_name, - const char *port_type, - unsigned long flags, - unsigned long buffer_size) -{ - typedef jack_port_t *(*func)(jack_client_t *client, - const char *port_name, - const char *port_type, - unsigned long flags, - unsigned long buffer_size); - void *s = symbol("jack_port_register"); - if (!s) return 0; - func f = (func)s; - return f(client, port_name, port_type, flags, buffer_size); -} - -static int dynamic_jack_connect(jack_client_t *client, - const char *source, - const char *dest) -{ - typedef int (*func)(jack_client_t *client, - const char *source, - const char *dest); - void *s = symbol("jack_connect"); - if (!s) return 1; - func f = (func)s; - return f(client, source, dest); -} - -static void *dynamic_jack_port_get_buffer(jack_port_t *port, - jack_nframes_t sz) -{ - typedef void *(*func)(jack_port_t *, jack_nframes_t); - void *s = symbol("jack_port_get_buffer"); - if (!s) return 0; - func f = (func)s; - return f(port, sz); -} - -static int dynamic_jack_port_unregister(jack_client_t *client, - jack_port_t *port) -{ - typedef int(*func)(jack_client_t *, jack_port_t *); - void *s = symbol("jack_port_unregister"); - if (!s) return 0; - func f = (func)s; - return f(client, port); -} - -static void dynamic_jack_port_get_latency_range(jack_port_t *port, - jack_latency_callback_mode_t mode, - jack_latency_range_t *range) -{ - typedef void (*func)(jack_port_t *, jack_latency_callback_mode_t, jack_latency_range_t *); - void *s = symbol("jack_port_get_latency_range"); - if (!s) { - range->min = range->max = 0; - return; - } - func f = (func)s; - f(port, mode, range); -} - -#define dynamic1(rv, name, argtype, failval) \ - static rv dynamic_##name(argtype arg) { \ - typedef rv (*func) (argtype); \ - void *s = symbol(#name); \ - if (!s) return failval; \ - func f = (func) s; \ - return f(arg); \ - } - -dynamic1(jack_client_t *, jack_client_new, const char *, 0); -dynamic1(jack_nframes_t, jack_get_buffer_size, jack_client_t *, 0); -dynamic1(jack_nframes_t, jack_get_sample_rate, jack_client_t *, 0); -dynamic1(int, jack_activate, jack_client_t *, 1); -dynamic1(int, jack_deactivate, jack_client_t *, 1); -dynamic1(int, jack_client_close, jack_client_t *, 1); -dynamic1(jack_nframes_t, jack_frame_time, jack_client_t *, 0); -dynamic1(const char *, jack_port_name, const jack_port_t *, 0); - -#define jack_client_new dynamic_jack_client_new -#define jack_client_open dynamic_jack_client_open -#define jack_get_buffer_size dynamic_jack_get_buffer_size -#define jack_get_sample_rate dynamic_jack_get_sample_rate -#define jack_set_process_callback dynamic_jack_set_process_callback -#define jack_set_xrun_callback dynamic_jack_set_xrun_callback -#define jack_activate dynamic_jack_activate -#define jack_deactivate dynamic_jack_deactivate -#define jack_client_close dynamic_jack_client_close -#define jack_frame_time dynamic_jack_frame_time -#define jack_get_ports dynamic_jack_get_ports -#define jack_port_register dynamic_jack_port_register -#define jack_port_unregister dynamic_jack_port_unregister -#define jack_port_name dynamic_jack_port_name -#define jack_connect dynamic_jack_connect -#define jack_port_get_buffer dynamic_jack_port_get_buffer - -#endif -#endif - -AudioJACKTarget::AudioJACKTarget(AudioCallbackPlaySource *source) : - AudioCallbackPlayTarget(source), - m_client(0), - m_bufferSize(0), - m_sampleRate(0), - m_done(false) -{ - JackOptions options = JackNullOption; -#ifdef HAVE_PORTAUDIO_2_0 - options = JackNoStartServer; -#endif -#ifdef HAVE_LIBPULSE - options = JackNoStartServer; -#endif - - JackStatus status = JackStatus(0); - m_client = jack_client_open(source->getClientName().toLocal8Bit().data(), - options, &status); - - if (!m_client) { - cerr << "AudioJACKTarget: Failed to connect to JACK server: status code " - << status << endl; - return; - } - - m_bufferSize = jack_get_buffer_size(m_client); - m_sampleRate = jack_get_sample_rate(m_client); - - jack_set_xrun_callback(m_client, xrunStatic, this); - jack_set_process_callback(m_client, processStatic, this); - - if (jack_activate(m_client)) { - cerr << "ERROR: AudioJACKTarget: Failed to activate JACK client" - << endl; - } - - if (m_source) { - sourceModelReplaced(); - } - - // Mainstream JACK (though not jackdmp) calls mlockall() to lock - // down all memory for real-time operation. That isn't a terribly - // good idea in an application like this that may have very high - // dynamic memory usage in other threads, as mlockall() applies - // across all threads. We're far better off undoing it here and - // accepting the possible loss of true RT capability. - MUNLOCKALL(); -} - -AudioJACKTarget::~AudioJACKTarget() -{ - SVDEBUG << "AudioJACKTarget::~AudioJACKTarget()" << endl; - - if (m_source) { - m_source->setTarget(0, m_bufferSize); - } - - shutdown(); - - if (m_client) { - - while (m_outputs.size() > 0) { - std::vector<jack_port_t *>::iterator itr = m_outputs.end(); - --itr; - jack_port_t *port = *itr; - cerr << "unregister " << m_outputs.size() << endl; - if (port) jack_port_unregister(m_client, port); - m_outputs.erase(itr); - } - cerr << "Deactivating... "; - jack_deactivate(m_client); - cerr << "done\nClosing... "; - jack_client_close(m_client); - cerr << "done" << endl; - } - - m_client = 0; - - SVDEBUG << "AudioJACKTarget::~AudioJACKTarget() done" << endl; -} - -void -AudioJACKTarget::shutdown() -{ - m_done = true; -} - -bool -AudioJACKTarget::isOK() const -{ - return (m_client != 0); -} - -double -AudioJACKTarget::getCurrentTime() const -{ - if (m_client && m_sampleRate) { - return double(jack_frame_time(m_client)) / double(m_sampleRate); - } else { - return 0.0; - } -} - -int -AudioJACKTarget::processStatic(jack_nframes_t nframes, void *arg) -{ - return ((AudioJACKTarget *)arg)->process(nframes); -} - -int -AudioJACKTarget::xrunStatic(void *arg) -{ - return ((AudioJACKTarget *)arg)->xrun(); -} - -void -AudioJACKTarget::sourceModelReplaced() -{ - m_mutex.lock(); - - m_source->setTarget(this, m_bufferSize); - m_source->setTargetSampleRate(m_sampleRate); - - int channels = m_source->getSourceChannelCount(); - - // Because we offer pan, we always want at least 2 channels - if (channels < 2) channels = 2; - - if (channels == (int)m_outputs.size() || !m_client) { - m_mutex.unlock(); - return; - } - - const char **ports = - jack_get_ports(m_client, NULL, NULL, - JackPortIsPhysical | JackPortIsInput); - int physicalPortCount = 0; - while (ports[physicalPortCount]) ++physicalPortCount; - -#ifdef DEBUG_AUDIO_JACK_TARGET - SVDEBUG << "AudioJACKTarget::sourceModelReplaced: have " << channels << " channels and " << physicalPortCount << " physical ports" << endl; -#endif - - while ((int)m_outputs.size() < channels) { - - const int namelen = 30; - char name[namelen]; - jack_port_t *port; - - snprintf(name, namelen, "out %d", int(m_outputs.size() + 1)); - - port = jack_port_register(m_client, - name, - JACK_DEFAULT_AUDIO_TYPE, - JackPortIsOutput, - 0); - - if (!port) { - cerr - << "ERROR: AudioJACKTarget: Failed to create JACK output port " - << m_outputs.size() << endl; - } else { - jack_latency_range_t range; - jack_port_get_latency_range(port, JackPlaybackLatency, &range); - m_source->setTargetPlayLatency(range.max); - cerr << "AudioJACKTarget: output latency is " << range.max << endl; - } - - if ((int)m_outputs.size() < physicalPortCount) { - jack_connect(m_client, jack_port_name(port), ports[m_outputs.size()]); - } - - m_outputs.push_back(port); - } - - while ((int)m_outputs.size() > channels) { - std::vector<jack_port_t *>::iterator itr = m_outputs.end(); - --itr; - jack_port_t *port = *itr; - if (port) jack_port_unregister(m_client, port); - m_outputs.erase(itr); - } - - m_mutex.unlock(); -} - -int -AudioJACKTarget::process(jack_nframes_t nframes) -{ - if (m_done) return 0; - - if (!m_mutex.tryLock()) { - return 0; - } - - if (m_outputs.empty()) { - m_mutex.unlock(); - return 0; - } - -#ifdef DEBUG_AUDIO_JACK_TARGET - cout << "AudioJACKTarget::process(" << nframes << "): have a source" << endl; -#endif - -#ifdef DEBUG_AUDIO_JACK_TARGET - if (m_bufferSize != nframes) { - cerr << "WARNING: m_bufferSize != nframes (" << m_bufferSize << " != " << nframes << ")" << endl; - } -#endif - - float **buffers = (float **)alloca(m_outputs.size() * sizeof(float *)); - - for (int ch = 0; ch < (int)m_outputs.size(); ++ch) { - buffers[ch] = (float *)jack_port_get_buffer(m_outputs[ch], nframes); - } - - sv_frame_t received = 0; - - if (m_source) { - received = m_source->getSourceSamples(nframes, buffers); - } - - for (int ch = 0; ch < (int)m_outputs.size(); ++ch) { - for (sv_frame_t i = received; i < nframes; ++i) { - buffers[ch][i] = 0.0; - } - } - - float peakLeft = 0.0, peakRight = 0.0; - - for (int ch = 0; ch < (int)m_outputs.size(); ++ch) { - - float peak = 0.0; - - for (int i = 0; i < (int)nframes; ++i) { - buffers[ch][i] *= m_outputGain; - float sample = fabsf(buffers[ch][i]); - if (sample > peak) peak = sample; - } - - if (ch == 0) peakLeft = peak; - if (ch > 0 || m_outputs.size() == 1) peakRight = peak; - } - - if (m_source) { - m_source->setOutputLevels(peakLeft, peakRight); - } - - m_mutex.unlock(); - return 0; -} - -int -AudioJACKTarget::xrun() -{ - cerr << "AudioJACKTarget: xrun!" << endl; - if (m_source) m_source->audioProcessingOverload(); - return 0; -} - -#endif /* HAVE_JACK */ -
--- a/audioio/AudioJACKTarget.h Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,65 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifndef _AUDIO_JACK_TARGET_H_ -#define _AUDIO_JACK_TARGET_H_ - -#ifdef HAVE_JACK - -#include <jack/jack.h> -#include <vector> - -#include "AudioCallbackPlayTarget.h" - -#include <QMutex> - -class AudioCallbackPlaySource; - -class AudioJACKTarget : public AudioCallbackPlayTarget -{ - Q_OBJECT - -public: - AudioJACKTarget(AudioCallbackPlaySource *source); - virtual ~AudioJACKTarget(); - - virtual void shutdown(); - - virtual bool isOK() const; - - virtual double getCurrentTime() const; - -public slots: - virtual void sourceModelReplaced(); - -protected: - int process(jack_nframes_t nframes); - int xrun(); - - static int processStatic(jack_nframes_t, void *); - static int xrunStatic(void *); - - jack_client_t *m_client; - std::vector<jack_port_t *> m_outputs; - jack_nframes_t m_bufferSize; - jack_nframes_t m_sampleRate; - QMutex m_mutex; - bool m_done; -}; - -#endif /* HAVE_JACK */ - -#endif -
--- a/audioio/AudioPortAudioTarget.cpp Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,300 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifdef HAVE_PORTAUDIO_2_0 - -#include "AudioPortAudioTarget.h" -#include "AudioCallbackPlaySource.h" - -#include <iostream> -#include <cassert> -#include <cmath> - -#ifndef _WIN32 -#include <pthread.h> -#endif - -//#define DEBUG_AUDIO_PORT_AUDIO_TARGET 1 - -AudioPortAudioTarget::AudioPortAudioTarget(AudioCallbackPlaySource *source) : - AudioCallbackPlayTarget(source), - m_stream(0), - m_bufferSize(0), - m_sampleRate(0), - m_latency(0), - m_prioritySet(false), - m_done(false) -{ - PaError err; - -#ifdef DEBUG_AUDIO_PORT_AUDIO_TARGET - cerr << "AudioPortAudioTarget: Initialising for PortAudio v19" << endl; -#endif - - err = Pa_Initialize(); - if (err != paNoError) { - cerr << "ERROR: AudioPortAudioTarget: Failed to initialize PortAudio: " << Pa_GetErrorText(err) << endl; - return; - } - - m_bufferSize = 2048; - m_sampleRate = 44100; - if (m_source && (m_source->getSourceSampleRate() != 0)) { - m_sampleRate = int(m_source->getSourceSampleRate()); - } - - PaStreamParameters op; - op.device = Pa_GetDefaultOutputDevice(); - op.channelCount = 2; - op.sampleFormat = paFloat32; - op.suggestedLatency = 0.2; - op.hostApiSpecificStreamInfo = 0; - err = Pa_OpenStream(&m_stream, 0, &op, m_sampleRate, - paFramesPerBufferUnspecified, - paNoFlag, processStatic, this); - - if (err != paNoError) { - - cerr << "WARNING: AudioPortAudioTarget: Failed to open PortAudio stream with default frames per buffer, trying again with fixed frames per buffer..." << endl; - - err = Pa_OpenStream(&m_stream, 0, &op, m_sampleRate, - 1024, - paNoFlag, processStatic, this); - m_bufferSize = 1024; - } - - if (err != paNoError) { - cerr << "ERROR: AudioPortAudioTarget: Failed to open PortAudio stream: " << Pa_GetErrorText(err) << endl; - cerr << "Note: device ID was " << op.device << endl; - m_stream = 0; - Pa_Terminate(); - return; - } - - const PaStreamInfo *info = Pa_GetStreamInfo(m_stream); - m_latency = int(info->outputLatency * m_sampleRate + 0.001); - if (m_bufferSize < m_latency) m_bufferSize = m_latency; - - cerr << "PortAudio latency = " << m_latency << " frames" << endl; - - err = Pa_StartStream(m_stream); - - if (err != paNoError) { - cerr << "ERROR: AudioPortAudioTarget: Failed to start PortAudio stream: " << Pa_GetErrorText(err) << endl; - Pa_CloseStream(m_stream); - m_stream = 0; - Pa_Terminate(); - return; - } - - if (m_source) { - cerr << "AudioPortAudioTarget: block size " << m_bufferSize << endl; - m_source->setTarget(this, m_bufferSize); - m_source->setTargetSampleRate(m_sampleRate); - m_source->setTargetPlayLatency(m_latency); - } - -#ifdef DEBUG_PORT_AUDIO_TARGET - cerr << "AudioPortAudioTarget: initialised OK" << endl; -#endif -} - -AudioPortAudioTarget::~AudioPortAudioTarget() -{ - SVDEBUG << "AudioPortAudioTarget::~AudioPortAudioTarget()" << endl; - - if (m_source) { - m_source->setTarget(0, m_bufferSize); - } - - shutdown(); - - if (m_stream) { - - SVDEBUG << "closing stream" << endl; - - PaError err; - err = Pa_CloseStream(m_stream); - if (err != paNoError) { - cerr << "ERROR: AudioPortAudioTarget: Failed to close PortAudio stream: " << Pa_GetErrorText(err) << endl; - } - - cerr << "terminating" << endl; - - err = Pa_Terminate(); - if (err != paNoError) { - cerr << "ERROR: AudioPortAudioTarget: Failed to terminate PortAudio: " << Pa_GetErrorText(err) << endl; - } - } - - m_stream = 0; - - SVDEBUG << "AudioPortAudioTarget::~AudioPortAudioTarget() done" << endl; -} - -void -AudioPortAudioTarget::shutdown() -{ -#ifdef DEBUG_PORT_AUDIO_TARGET - SVDEBUG << "AudioPortAudioTarget::shutdown" << endl; -#endif - m_done = true; -} - -bool -AudioPortAudioTarget::isOK() const -{ - return (m_stream != 0); -} - -double -AudioPortAudioTarget::getCurrentTime() const -{ - if (!m_stream) return 0.0; - else return Pa_GetStreamTime(m_stream); -} - -int -AudioPortAudioTarget::processStatic(const void *input, void *output, - unsigned long nframes, - const PaStreamCallbackTimeInfo *timeInfo, - PaStreamCallbackFlags flags, void *data) -{ - return ((AudioPortAudioTarget *)data)->process(input, output, - nframes, timeInfo, - flags); -} - -void -AudioPortAudioTarget::sourceModelReplaced() -{ - m_source->setTargetSampleRate(m_sampleRate); -} - -int -AudioPortAudioTarget::process(const void *, void *outputBuffer, - sv_frame_t nframes, - const PaStreamCallbackTimeInfo *, - PaStreamCallbackFlags) -{ -#ifdef DEBUG_AUDIO_PORT_AUDIO_TARGET - SVDEBUG << "AudioPortAudioTarget::process(" << nframes << ")" << endl; -#endif - - if (!m_source || m_done) { -#ifdef DEBUG_AUDIO_PORT_AUDIO_TARGET - SVDEBUG << "AudioPortAudioTarget::process: Doing nothing, no source or application done" << endl; -#endif - return 0; - } - - if (!m_prioritySet) { -#ifndef _WIN32 - sched_param param; - param.sched_priority = 20; - if (pthread_setschedparam(pthread_self(), SCHED_RR, ¶m)) { - SVDEBUG << "AudioPortAudioTarget: NOTE: couldn't set RT scheduling class" << endl; - } else { - SVDEBUG << "AudioPortAudioTarget: NOTE: successfully set RT scheduling class" << endl; - } -#endif - m_prioritySet = true; - } - - float *output = (float *)outputBuffer; - - assert(nframes <= m_bufferSize); - - static float **tmpbuf = 0; - static int tmpbufch = 0; - static int tmpbufsz = 0; - - int sourceChannels = m_source->getSourceChannelCount(); - - // Because we offer pan, we always want at least 2 channels - if (sourceChannels < 2) sourceChannels = 2; - - if (!tmpbuf || tmpbufch != sourceChannels || int(tmpbufsz) < m_bufferSize) { - - if (tmpbuf) { - for (int i = 0; i < tmpbufch; ++i) { - delete[] tmpbuf[i]; - } - delete[] tmpbuf; - } - - tmpbufch = sourceChannels; - tmpbufsz = m_bufferSize; - tmpbuf = new float *[tmpbufch]; - - for (int i = 0; i < tmpbufch; ++i) { - tmpbuf[i] = new float[tmpbufsz]; - } - } - - sv_frame_t received = m_source->getSourceSamples(nframes, tmpbuf); - - float peakLeft = 0.0, peakRight = 0.0; - - for (int ch = 0; ch < 2; ++ch) { - - float peak = 0.0; - - if (ch < sourceChannels) { - - // PortAudio samples are interleaved - for (int i = 0; i < nframes; ++i) { - if (i < received) { - output[i * 2 + ch] = tmpbuf[ch][i] * m_outputGain; - float sample = fabsf(output[i * 2 + ch]); - if (sample > peak) peak = sample; - } else { - output[i * 2 + ch] = 0; - } - } - - } else if (ch == 1 && sourceChannels == 1) { - - for (int i = 0; i < nframes; ++i) { - if (i < received) { - output[i * 2 + ch] = tmpbuf[0][i] * m_outputGain; - float sample = fabsf(output[i * 2 + ch]); - if (sample > peak) peak = sample; - } else { - output[i * 2 + ch] = 0; - } - } - - } else { - for (int i = 0; i < nframes; ++i) { - output[i * 2 + ch] = 0; - } - } - - if (ch == 0) peakLeft = peak; - if (ch > 0 || sourceChannels == 1) peakRight = peak; - } - - m_source->setOutputLevels(peakLeft, peakRight); - - if (Pa_GetStreamCpuLoad(m_stream) > 0.7) { - if (m_source) m_source->audioProcessingOverload(); - } - - return 0; -} - -#endif /* HAVE_PORTAUDIO */ -
--- a/audioio/AudioPortAudioTarget.h Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,71 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifndef _AUDIO_PORT_AUDIO_TARGET_H_ -#define _AUDIO_PORT_AUDIO_TARGET_H_ - -#ifdef HAVE_PORTAUDIO_2_0 - -// This code requires PortAudio v19 -- it won't work with v18. - -#include <portaudio.h> - -#include <QObject> - -#include "AudioCallbackPlayTarget.h" - -#include "base/BaseTypes.h" - -class AudioCallbackPlaySource; - -class AudioPortAudioTarget : public AudioCallbackPlayTarget -{ - Q_OBJECT - -public: - AudioPortAudioTarget(AudioCallbackPlaySource *source); - virtual ~AudioPortAudioTarget(); - - virtual void shutdown(); - - virtual bool isOK() const; - - virtual double getCurrentTime() const; - -public slots: - virtual void sourceModelReplaced(); - -protected: - int process(const void *input, void *output, sv_frame_t frames, - const PaStreamCallbackTimeInfo *timeInfo, - PaStreamCallbackFlags statusFlags); - - static int processStatic(const void *, void *, unsigned long, - const PaStreamCallbackTimeInfo *, - PaStreamCallbackFlags, void *); - - PaStream *m_stream; - - int m_bufferSize; - int m_sampleRate; - int m_latency; - bool m_prioritySet; - bool m_done; -}; - -#endif /* HAVE_PORTAUDIO */ - -#endif -
--- a/audioio/AudioPulseAudioTarget.cpp Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,416 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2008 QMUL. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifdef HAVE_LIBPULSE - -#include "AudioPulseAudioTarget.h" -#include "AudioCallbackPlaySource.h" - -#include <QMutexLocker> - -#include <iostream> -#include <cassert> -#include <cmath> - -#define DEBUG_AUDIO_PULSE_AUDIO_TARGET 1 -//#define DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY 1 - -AudioPulseAudioTarget::AudioPulseAudioTarget(AudioCallbackPlaySource *source) : - AudioCallbackPlayTarget(source), - m_mutex(QMutex::Recursive), - m_loop(0), - m_api(0), - m_context(0), - m_stream(0), - m_loopThread(0), - m_bufferSize(0), - m_sampleRate(0), - m_latency(0), - m_done(false) -{ -#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET - cerr << "AudioPulseAudioTarget: Initialising for PulseAudio" << endl; -#endif - - m_loop = pa_mainloop_new(); - if (!m_loop) { - cerr << "ERROR: AudioPulseAudioTarget: Failed to create main loop" << endl; - return; - } - - m_api = pa_mainloop_get_api(m_loop); - - //!!! handle signals how? - - m_bufferSize = 20480; - m_sampleRate = 44100; - if (m_source && (m_source->getSourceSampleRate() != 0)) { - m_sampleRate = int(m_source->getSourceSampleRate()); - } - m_spec.rate = m_sampleRate; - m_spec.channels = 2; - m_spec.format = PA_SAMPLE_FLOAT32NE; - -#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET - cerr << "AudioPulseAudioTarget: Creating context" << endl; -#endif - - m_context = pa_context_new(m_api, source->getClientName().toLocal8Bit().data()); - if (!m_context) { - cerr << "ERROR: AudioPulseAudioTarget: Failed to create context object" << endl; - return; - } - - pa_context_set_state_callback(m_context, contextStateChangedStatic, this); - -#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET - cerr << "AudioPulseAudioTarget: Connecting to default server..." << endl; -#endif - - pa_context_connect(m_context, 0, // default server - (pa_context_flags_t)PA_CONTEXT_NOAUTOSPAWN, 0); - -#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET - cerr << "AudioPulseAudioTarget: Starting main loop" << endl; -#endif - - m_loopThread = new MainLoopThread(m_loop); - m_loopThread->start(); - -#ifdef DEBUG_PULSE_AUDIO_TARGET - cerr << "AudioPulseAudioTarget: initialised OK" << endl; -#endif -} - -AudioPulseAudioTarget::~AudioPulseAudioTarget() -{ - SVDEBUG << "AudioPulseAudioTarget::~AudioPulseAudioTarget()" << endl; - - if (m_source) { - m_source->setTarget(0, m_bufferSize); - } - - shutdown(); - - QMutexLocker locker(&m_mutex); - - if (m_stream) pa_stream_unref(m_stream); - - if (m_context) pa_context_unref(m_context); - - if (m_loop) { - pa_signal_done(); - pa_mainloop_free(m_loop); - } - - m_stream = 0; - m_context = 0; - m_loop = 0; - - SVDEBUG << "AudioPulseAudioTarget::~AudioPulseAudioTarget() done" << endl; -} - -void -AudioPulseAudioTarget::shutdown() -{ - m_done = true; -} - -bool -AudioPulseAudioTarget::isOK() const -{ - return (m_context != 0); -} - -double -AudioPulseAudioTarget::getCurrentTime() const -{ - if (!m_stream) return 0.0; - - pa_usec_t usec = 0; - pa_stream_get_time(m_stream, &usec); - return double(usec) / 1000000.0; -} - -void -AudioPulseAudioTarget::sourceModelReplaced() -{ - m_source->setTargetSampleRate(m_sampleRate); -} - -void -AudioPulseAudioTarget::streamWriteStatic(pa_stream *, - size_t length, - void *data) -{ - AudioPulseAudioTarget *target = (AudioPulseAudioTarget *)data; - -// assert(stream == target->m_stream); - - target->streamWrite(length); -} - -void -AudioPulseAudioTarget::streamWrite(sv_frame_t requested) -{ -#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY - cout << "AudioPulseAudioTarget::streamWrite(" << requested << ")" << endl; -#endif - if (m_done) return; - - QMutexLocker locker(&m_mutex); - - pa_usec_t latency = 0; - int negative = 0; - if (!pa_stream_get_latency(m_stream, &latency, &negative)) { - int latframes = int(double(latency) / 1000000.0 * double(m_sampleRate)); - if (latframes > 0) m_source->setTargetPlayLatency(latframes); - } - - static float *output = 0; - static float **tmpbuf = 0; - static int tmpbufch = 0; - static sv_frame_t tmpbufsz = 0; - - int sourceChannels = m_source->getSourceChannelCount(); - - // Because we offer pan, we always want at least 2 channels - if (sourceChannels < 2) sourceChannels = 2; - - sv_frame_t nframes = requested / (sourceChannels * sizeof(float)); - - if (nframes > m_bufferSize) { - cerr << "WARNING: AudioPulseAudioTarget::streamWrite: nframes " << nframes << " > m_bufferSize " << m_bufferSize << endl; - } - -#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY - cout << "AudioPulseAudioTarget::streamWrite: nframes = " << nframes << endl; -#endif - - if (!tmpbuf || tmpbufch != sourceChannels || int(tmpbufsz) < nframes) { - - if (tmpbuf) { - for (int i = 0; i < tmpbufch; ++i) { - delete[] tmpbuf[i]; - } - delete[] tmpbuf; - } - - if (output) { - delete[] output; - } - - tmpbufch = sourceChannels; - tmpbufsz = nframes; - tmpbuf = new float *[tmpbufch]; - - for (int i = 0; i < tmpbufch; ++i) { - tmpbuf[i] = new float[tmpbufsz]; - } - - output = new float[tmpbufsz * tmpbufch]; - } - - sv_frame_t received = m_source->getSourceSamples(nframes, tmpbuf); - -#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY - cerr << "requested " << nframes << ", received " << received << endl; - - if (received < nframes) { - cerr << "*** WARNING: Wrong number of frames received" << endl; - } -#endif - - float peakLeft = 0.0, peakRight = 0.0; - - for (int ch = 0; ch < 2; ++ch) { - - float peak = 0.0; - - // PulseAudio samples are interleaved - for (int i = 0; i < nframes; ++i) { - if (i < received) { - output[i * 2 + ch] = tmpbuf[ch][i] * m_outputGain; - float sample = fabsf(output[i * 2 + ch]); - if (sample > peak) peak = sample; - } else { - output[i * 2 + ch] = 0; - } - } - - if (ch == 0) peakLeft = peak; - if (ch == 1) peakRight = peak; - } - -#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY - SVDEBUG << "calling pa_stream_write with " - << nframes * tmpbufch * sizeof(float) << " bytes" << endl; -#endif - - pa_stream_write(m_stream, output, - size_t(nframes * tmpbufch * sizeof(float)), - 0, 0, PA_SEEK_RELATIVE); - - m_source->setOutputLevels(peakLeft, peakRight); - - return; -} - -void -AudioPulseAudioTarget::streamStateChangedStatic(pa_stream *, - void *data) -{ - AudioPulseAudioTarget *target = (AudioPulseAudioTarget *)data; - -// assert(stream == target->m_stream); - - target->streamStateChanged(); -} - -void -AudioPulseAudioTarget::streamStateChanged() -{ -#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET - SVDEBUG << "AudioPulseAudioTarget::streamStateChanged" << endl; -#endif - QMutexLocker locker(&m_mutex); - - switch (pa_stream_get_state(m_stream)) { - - case PA_STREAM_UNCONNECTED: - case PA_STREAM_CREATING: - case PA_STREAM_TERMINATED: - break; - - case PA_STREAM_READY: - { - SVDEBUG << "AudioPulseAudioTarget::streamStateChanged: Ready" << endl; - - pa_usec_t latency = 0; - int negative = 0; - if (pa_stream_get_latency(m_stream, &latency, &negative)) { - cerr << "AudioPulseAudioTarget::streamStateChanged: Failed to query latency" << endl; - } - cerr << "Latency = " << latency << " usec" << endl; - int latframes = int(double(latency) / 1000000.0 * m_sampleRate); - cerr << "that's " << latframes << " frames" << endl; - - const pa_buffer_attr *attr; - if (!(attr = pa_stream_get_buffer_attr(m_stream))) { - SVDEBUG << "AudioPulseAudioTarget::streamStateChanged: Cannot query stream buffer attributes" << endl; - m_source->setTarget(this, m_bufferSize); - m_source->setTargetSampleRate(m_sampleRate); - if (latframes != 0) m_source->setTargetPlayLatency(latframes); - } else { - int targetLength = attr->tlength; - SVDEBUG << "AudioPulseAudioTarget::streamStateChanged: stream target length = " << targetLength << endl; - m_source->setTarget(this, targetLength); - m_source->setTargetSampleRate(m_sampleRate); - if (latframes == 0) latframes = targetLength; - cerr << "latency = " << latframes << endl; - m_source->setTargetPlayLatency(latframes); - } - } - break; - - case PA_STREAM_FAILED: - default: - cerr << "AudioPulseAudioTarget::streamStateChanged: Error: " - << pa_strerror(pa_context_errno(m_context)) << endl; - //!!! do something... - break; - } -} - -void -AudioPulseAudioTarget::contextStateChangedStatic(pa_context *, - void *data) -{ - AudioPulseAudioTarget *target = (AudioPulseAudioTarget *)data; - -// assert(context == target->m_context); - - target->contextStateChanged(); -} - -void -AudioPulseAudioTarget::contextStateChanged() -{ -#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET - SVDEBUG << "AudioPulseAudioTarget::contextStateChanged" << endl; -#endif - QMutexLocker locker(&m_mutex); - - switch (pa_context_get_state(m_context)) { - - case PA_CONTEXT_UNCONNECTED: - case PA_CONTEXT_CONNECTING: - case PA_CONTEXT_AUTHORIZING: - case PA_CONTEXT_SETTING_NAME: - break; - - case PA_CONTEXT_READY: - SVDEBUG << "AudioPulseAudioTarget::contextStateChanged: Ready" - << endl; - - m_stream = pa_stream_new(m_context, "stream", &m_spec, 0); - assert(m_stream); //!!! - - pa_stream_set_state_callback(m_stream, streamStateChangedStatic, this); - pa_stream_set_write_callback(m_stream, streamWriteStatic, this); - pa_stream_set_overflow_callback(m_stream, streamOverflowStatic, this); - pa_stream_set_underflow_callback(m_stream, streamUnderflowStatic, this); - if (pa_stream_connect_playback - (m_stream, 0, 0, - pa_stream_flags_t(PA_STREAM_INTERPOLATE_TIMING | - PA_STREAM_AUTO_TIMING_UPDATE), - 0, 0)) { //??? return value - cerr << "AudioPulseAudioTarget: Failed to connect playback stream" << endl; - } - - break; - - case PA_CONTEXT_TERMINATED: - SVDEBUG << "AudioPulseAudioTarget::contextStateChanged: Terminated" << endl; - //!!! do something... - break; - - case PA_CONTEXT_FAILED: - default: - cerr << "AudioPulseAudioTarget::contextStateChanged: Error: " - << pa_strerror(pa_context_errno(m_context)) << endl; - //!!! do something... - break; - } -} - -void -AudioPulseAudioTarget::streamOverflowStatic(pa_stream *, void *) -{ - SVDEBUG << "AudioPulseAudioTarget::streamOverflowStatic: Overflow!" << endl; -} - -void -AudioPulseAudioTarget::streamUnderflowStatic(pa_stream *, void *data) -{ - SVDEBUG << "AudioPulseAudioTarget::streamUnderflowStatic: Underflow!" << endl; - AudioPulseAudioTarget *target = (AudioPulseAudioTarget *)data; - if (target && target->m_source) { - target->m_source->audioProcessingOverload(); - } -} - -#endif /* HAVE_PULSEAUDIO */ -
--- a/audioio/AudioPulseAudioTarget.h Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,91 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2008 QMUL. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifndef _AUDIO_PULSE_AUDIO_TARGET_H_ -#define _AUDIO_PULSE_AUDIO_TARGET_H_ - -#ifdef HAVE_LIBPULSE - -#include <pulse/pulseaudio.h> - -#include <QObject> -#include <QMutex> -#include "base/Thread.h" - -#include "AudioCallbackPlayTarget.h" - -class AudioCallbackPlaySource; - -class AudioPulseAudioTarget : public AudioCallbackPlayTarget -{ - Q_OBJECT - -public: - AudioPulseAudioTarget(AudioCallbackPlaySource *source); - virtual ~AudioPulseAudioTarget(); - - virtual void shutdown(); - - virtual bool isOK() const; - - virtual double getCurrentTime() const; - -public slots: - virtual void sourceModelReplaced(); - -protected: - void streamWrite(sv_frame_t); - void streamStateChanged(); - void contextStateChanged(); - - static void streamWriteStatic(pa_stream *, size_t, void *); - static void streamStateChangedStatic(pa_stream *, void *); - static void streamOverflowStatic(pa_stream *, void *); - static void streamUnderflowStatic(pa_stream *, void *); - static void contextStateChangedStatic(pa_context *, void *); - - QMutex m_mutex; - - class MainLoopThread : public Thread - { - public: - MainLoopThread(pa_mainloop *loop) : Thread(NonRTThread), m_loop(loop) { } //!!! or RTThread - virtual void run() { - int rv = 0; - pa_mainloop_run(m_loop, &rv); //!!! check return value from this, and rv - } - - private: - pa_mainloop *m_loop; - }; - - pa_mainloop *m_loop; - pa_mainloop_api *m_api; - pa_context *m_context; - pa_stream *m_stream; - pa_sample_spec m_spec; - - MainLoopThread *m_loopThread; - - int m_bufferSize; - int m_sampleRate; - int m_latency; - bool m_done; -}; - -#endif /* HAVE_PULSEAUDIO */ - -#endif -
--- a/audioio/AudioTargetFactory.cpp Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,164 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#include "AudioTargetFactory.h" - -#include "AudioJACKTarget.h" -#include "AudioPortAudioTarget.h" -#include "AudioPulseAudioTarget.h" - -#include "AudioCallbackPlayTarget.h" - -#include <QCoreApplication> - -#include <iostream> - -AudioTargetFactory * -AudioTargetFactory::m_instance = 0; - -AudioTargetFactory * -AudioTargetFactory::getInstance() -{ - if (!m_instance) m_instance = new AudioTargetFactory(); - return m_instance; -} - -AudioTargetFactory::AudioTargetFactory() -{ -} - -std::vector<QString> -AudioTargetFactory::getCallbackTargetNames(bool includeAuto) const -{ - std::vector<QString> names; - if (includeAuto) names.push_back("auto"); - -#ifdef HAVE_JACK - names.push_back("jack"); -#endif - -#ifdef HAVE_LIBPULSE - names.push_back("pulse"); -#endif - -#ifdef HAVE_PORTAUDIO_2_0 - names.push_back("port"); -#endif - - return names; -} - -QString -AudioTargetFactory::getCallbackTargetDescription(QString name) const -{ - if (name == "auto") { - return QCoreApplication::translate("AudioTargetFactory", - "(auto)"); - } - if (name == "jack") { - return QCoreApplication::translate("AudioTargetFactory", - "JACK Audio Connection Kit"); - } - if (name == "pulse") { - return QCoreApplication::translate("AudioTargetFactory", - "PulseAudio Server"); - } - if (name == "port") { - return QCoreApplication::translate("AudioTargetFactory", - "Default Soundcard Device"); - } - - return "(unknown)"; -} - -QString -AudioTargetFactory::getDefaultCallbackTarget() const -{ - if (m_default == "") return "auto"; - return m_default; -} - -bool -AudioTargetFactory::isAutoCallbackTarget(QString name) const -{ - return (name == "auto" || name == ""); -} - -void -AudioTargetFactory::setDefaultCallbackTarget(QString target) -{ - m_default = target; -} - -AudioCallbackPlayTarget * -AudioTargetFactory::createCallbackTarget(AudioCallbackPlaySource *source) -{ - AudioCallbackPlayTarget *target = 0; - - if (m_default != "" && m_default != "auto") { - -#ifdef HAVE_JACK - if (m_default == "jack") target = new AudioJACKTarget(source); -#endif - -#ifdef HAVE_LIBPULSE - if (m_default == "pulse") target = new AudioPulseAudioTarget(source); -#endif - -#ifdef HAVE_PORTAUDIO_2_0 - if (m_default == "port") target = new AudioPortAudioTarget(source); -#endif - - if (!target || !target->isOK()) { - cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open the requested target (\"" << m_default << "\")" << endl; - delete target; - return 0; - } else { - return target; - } - } - -#ifdef HAVE_JACK - target = new AudioJACKTarget(source); - if (target->isOK()) return target; - else { - cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open JACK target" << endl; - delete target; - } -#endif - -#ifdef HAVE_LIBPULSE - target = new AudioPulseAudioTarget(source); - if (target->isOK()) return target; - else { - cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open PulseAudio target" << endl; - delete target; - } -#endif - -#ifdef HAVE_PORTAUDIO_2_0 - target = new AudioPortAudioTarget(source); - if (target->isOK()) return target; - else { - cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open PortAudio target" << endl; - delete target; - } -#endif - - cerr << "WARNING: AudioTargetFactory::createCallbackTarget: No suitable targets available" << endl; - return 0; -} - -
--- a/audioio/AudioTargetFactory.h Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,47 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifndef _AUDIO_TARGET_FACTORY_H_ -#define _AUDIO_TARGET_FACTORY_H_ - -#include <vector> -#include <QString> - -#include "base/Debug.h" - -class AudioCallbackPlaySource; -class AudioCallbackPlayTarget; - -class AudioTargetFactory -{ -public: - static AudioTargetFactory *getInstance(); - - std::vector<QString> getCallbackTargetNames(bool includeAuto = true) const; - QString getCallbackTargetDescription(QString name) const; - QString getDefaultCallbackTarget() const; - bool isAutoCallbackTarget(QString name) const; - void setDefaultCallbackTarget(QString name); - - AudioCallbackPlayTarget *createCallbackTarget(AudioCallbackPlaySource *); - -protected: - AudioTargetFactory(); - static AudioTargetFactory *m_instance; - QString m_default; -}; - -#endif -
--- a/audioio/ClipMixer.cpp Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,248 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam, 2006-2014 QMUL. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#include "ClipMixer.h" - -#include <sndfile.h> -#include <cmath> - -#include "base/Debug.h" - -//#define DEBUG_CLIP_MIXER 1 - -ClipMixer::ClipMixer(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize) : - m_channels(channels), - m_sampleRate(sampleRate), - m_blockSize(blockSize), - m_clipData(0), - m_clipLength(0), - m_clipF0(0), - m_clipRate(0) -{ -} - -ClipMixer::~ClipMixer() -{ - if (m_clipData) free(m_clipData); -} - -void -ClipMixer::setChannelCount(int channels) -{ - m_channels = channels; -} - -bool -ClipMixer::loadClipData(QString path, double f0, double level) -{ - if (m_clipData) { - cerr << "ClipMixer::loadClipData: Already have clip loaded" << endl; - return false; - } - - SF_INFO info; - SNDFILE *file; - float *tmpFrames; - sv_frame_t i; - - info.format = 0; - file = sf_open(path.toLocal8Bit().data(), SFM_READ, &info); - if (!file) { - cerr << "ClipMixer::loadClipData: Failed to open file path \"" - << path << "\": " << sf_strerror(file) << endl; - return false; - } - - tmpFrames = (float *)malloc(info.frames * info.channels * sizeof(float)); - if (!tmpFrames) { - cerr << "ClipMixer::loadClipData: malloc(" << info.frames * info.channels * sizeof(float) << ") failed" << endl; - return false; - } - - sf_readf_float(file, tmpFrames, info.frames); - sf_close(file); - - m_clipData = (float *)malloc(info.frames * sizeof(float)); - if (!m_clipData) { - cerr << "ClipMixer::loadClipData: malloc(" << info.frames * sizeof(float) << ") failed" << endl; - free(tmpFrames); - return false; - } - - for (i = 0; i < info.frames; ++i) { - int j; - m_clipData[i] = 0.0f; - for (j = 0; j < info.channels; ++j) { - m_clipData[i] += tmpFrames[i * info.channels + j] * float(level); - } - } - - free(tmpFrames); - - m_clipLength = info.frames; - m_clipF0 = f0; - m_clipRate = info.samplerate; - - return true; -} - -void -ClipMixer::reset() -{ - m_playing.clear(); -} - -double -ClipMixer::getResampleRatioFor(double frequency) -{ - if (!m_clipData || !m_clipRate) return 1.0; - double pitchRatio = m_clipF0 / frequency; - double resampleRatio = m_sampleRate / m_clipRate; - return pitchRatio * resampleRatio; -} - -sv_frame_t -ClipMixer::getResampledClipDuration(double frequency) -{ - return sv_frame_t(ceil(double(m_clipLength) * getResampleRatioFor(frequency))); -} - -void -ClipMixer::mix(float **toBuffers, - float gain, - std::vector<NoteStart> newNotes, - std::vector<NoteEnd> endingNotes) -{ - foreach (NoteStart note, newNotes) { - if (note.frequency > 20 && - note.frequency < 5000) { - m_playing.push_back(note); - } - } - - std::vector<NoteStart> remaining; - - float *levels = new float[m_channels]; - -#ifdef DEBUG_CLIP_MIXER - cerr << "ClipMixer::mix: have " << m_playing.size() << " playing note(s)" - << " and " << endingNotes.size() << " note(s) ending here" - << endl; -#endif - - foreach (NoteStart note, m_playing) { - - for (int c = 0; c < m_channels; ++c) { - levels[c] = note.level * gain; - } - if (note.pan != 0.0 && m_channels == 2) { - levels[0] *= 1.0f - note.pan; - levels[1] *= note.pan + 1.0f; - } - - sv_frame_t start = note.frameOffset; - sv_frame_t durationHere = m_blockSize; - if (start > 0) durationHere = m_blockSize - start; - - bool ending = false; - - foreach (NoteEnd end, endingNotes) { - if (end.frequency == note.frequency && - end.frameOffset >= start && - end.frameOffset <= m_blockSize) { - ending = true; - durationHere = end.frameOffset; - if (start > 0) durationHere = end.frameOffset - start; - break; - } - } - - sv_frame_t clipDuration = getResampledClipDuration(note.frequency); - if (start + clipDuration > 0) { - if (start < 0 && start + clipDuration < durationHere) { - durationHere = start + clipDuration; - } - if (durationHere > 0) { - mixNote(toBuffers, - levels, - note.frequency, - start < 0 ? -start : 0, - start > 0 ? start : 0, - durationHere, - ending); - } - } - - if (!ending) { - NoteStart adjusted = note; - adjusted.frameOffset -= m_blockSize; - remaining.push_back(adjusted); - } - } - - delete[] levels; - - m_playing = remaining; -} - -void -ClipMixer::mixNote(float **toBuffers, - float *levels, - float frequency, - sv_frame_t sourceOffset, - sv_frame_t targetOffset, - sv_frame_t sampleCount, - bool isEnd) -{ - if (!m_clipData) return; - - double ratio = getResampleRatioFor(frequency); - - double releaseTime = 0.01; - sv_frame_t releaseSampleCount = sv_frame_t(round(releaseTime * m_sampleRate)); - if (releaseSampleCount > sampleCount) { - releaseSampleCount = sampleCount; - } - double releaseFraction = 1.0/double(releaseSampleCount); - - for (sv_frame_t i = 0; i < sampleCount; ++i) { - - sv_frame_t s = sourceOffset + i; - - double os = double(s) / ratio; - sv_frame_t osi = sv_frame_t(floor(os)); - - //!!! just linear interpolation for now (same as SV's sample - //!!! player). a small sinc kernel would be better and - //!!! probably "good enough" - double value = 0.0; - if (osi < m_clipLength) { - value += m_clipData[osi]; - } - if (osi + 1 < m_clipLength) { - value += (m_clipData[osi + 1] - m_clipData[osi]) * (os - double(osi)); - } - - if (isEnd && i + releaseSampleCount > sampleCount) { - value *= releaseFraction * double(sampleCount - i); // linear ramp for release - } - - for (int c = 0; c < m_channels; ++c) { - toBuffers[c][targetOffset + i] += float(levels[c] * value); - } - } -} - -
--- a/audioio/ClipMixer.h Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,94 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 Chris Cannam, 2006-2014 QMUL. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifndef CLIP_MIXER_H -#define CLIP_MIXER_H - -#include <QString> -#include <vector> - -#include "base/BaseTypes.h" - -/** - * Mix in synthetic notes produced by resampling a prerecorded - * clip. (i.e. this is an implementation of a digital sampler in the - * musician's sense.) This can mix any number of notes of arbitrary - * frequency, so long as they all use the same sample clip. - */ - -class ClipMixer -{ -public: - ClipMixer(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize); - ~ClipMixer(); - - void setChannelCount(int channels); - - /** - * Load a sample clip from a wav file. This can only happen once: - * construct a new ClipMixer if you want a different clip. The - * clip was recorded at a pitch with fundamental frequency clipF0, - * and should be scaled by level (in the range 0-1) when playing - * back. - */ - bool loadClipData(QString clipFilePath, double clipF0, double level); - - void reset(); // discarding any playing notes - - struct NoteStart { - sv_frame_t frameOffset; // within current processing block - float frequency; // Hz - float level; // volume in range (0,1] - float pan; // range [-1,1] - }; - - struct NoteEnd { - sv_frame_t frameOffset; // in current processing block - float frequency; // matching note start - }; - - void mix(float **toBuffers, - float gain, - std::vector<NoteStart> newNotes, - std::vector<NoteEnd> endingNotes); - -private: - int m_channels; - sv_samplerate_t m_sampleRate; - sv_frame_t m_blockSize; - - QString m_clipPath; - - float *m_clipData; - sv_frame_t m_clipLength; - double m_clipF0; - sv_samplerate_t m_clipRate; - - std::vector<NoteStart> m_playing; - - double getResampleRatioFor(double frequency); - sv_frame_t getResampledClipDuration(double frequency); - - void mixNote(float **toBuffers, - float *levels, - float frequency, - sv_frame_t sourceOffset, // within resampled note - sv_frame_t targetOffset, // within target buffer - sv_frame_t sampleCount, - bool isEnd); -}; - - -#endif
--- a/audioio/ContinuousSynth.cpp Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,149 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#include "ContinuousSynth.h" - -#include "base/Debug.h" -#include "system/System.h" - -#include <cmath> - -ContinuousSynth::ContinuousSynth(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize, int waveType) : - m_channels(channels), - m_sampleRate(sampleRate), - m_blockSize(blockSize), - m_prevF0(-1.0), - m_phase(0.0), - m_wavetype(waveType) // 0: 3 sinusoids, 1: 1 sinusoid, 2: sawtooth, 3: square -{ -} - -ContinuousSynth::~ContinuousSynth() -{ -} - -void -ContinuousSynth::reset() -{ - m_phase = 0; -} - -void -ContinuousSynth::mix(float **toBuffers, float gain, float pan, float f0f) -{ - double f0(f0f); - if (f0 == 0.0) f0 = m_prevF0; - - bool wasOn = (m_prevF0 > 0.0); - bool nowOn = (f0 > 0.0); - - if (!nowOn && !wasOn) { - m_phase = 0; - return; - } - - sv_frame_t fadeLength = 100; - - float *levels = new float[m_channels]; - - for (int c = 0; c < m_channels; ++c) { - levels[c] = gain * 0.5f; // scale gain otherwise too loud compared to source - } - if (pan != 0.0 && m_channels == 2) { - levels[0] *= 1.0f - pan; - levels[1] *= pan + 1.0f; - } - -// cerr << "ContinuousSynth::mix: f0 = " << f0 << " (from " << m_prevF0 << "), phase = " << m_phase << endl; - - for (sv_frame_t i = 0; i < m_blockSize; ++i) { - - double fHere = (nowOn ? f0 : m_prevF0); - - if (wasOn && nowOn && (f0 != m_prevF0) && (i < fadeLength)) { - // interpolate the frequency shift - fHere = m_prevF0 + ((f0 - m_prevF0) * double(i)) / double(fadeLength); - } - - double phasor = (fHere * 2 * M_PI) / m_sampleRate; - - m_phase = m_phase + phasor; - - int harmonics = int((m_sampleRate / 4) / fHere - 1); - if (harmonics < 1) harmonics = 1; - - switch (m_wavetype) { - case 1: - harmonics = 1; - break; - case 2: - break; - case 3: - break; - default: - harmonics = 3; - break; - } - - for (int h = 0; h < harmonics; ++h) { - - double v = 0; - double hn = 0; - double hp = 0; - - switch (m_wavetype) { - case 1: // single sinusoid - v = sin(m_phase); - break; - case 2: // sawtooth - if (h != 0) { - hn = h + 1; - hp = m_phase * hn; - v = -(1.0 / M_PI) * sin(hp) / hn; - } else { - v = 0.5; - } - break; - case 3: // square - hn = h*2 + 1; - hp = m_phase * hn; - v = sin(hp) / hn; - break; - default: // 3 sinusoids - hn = h + 1; - hp = m_phase * hn; - v = sin(hp) / hn; - break; - } - - if (!wasOn && i < fadeLength) { - // fade in - v = v * (double(i) / double(fadeLength)); - } else if (!nowOn) { - // fade out - if (i > fadeLength) v = 0; - else v = v * (1.0 - (double(i) / double(fadeLength))); - } - - for (int c = 0; c < m_channels; ++c) { - toBuffers[c][i] += float(levels[c] * v); - } - } - } - - m_prevF0 = f0; - - delete[] levels; -} -
--- a/audioio/ContinuousSynth.h Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,65 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifndef CONTINUOUS_SYNTH_H -#define CONTINUOUS_SYNTH_H - -#include "base/BaseTypes.h" - -/** - * Mix into a target buffer a signal synthesised so as to sound at a - * specific frequency. The frequency may change with each processing - * block, or may be switched on or off. - */ - -class ContinuousSynth -{ -public: - ContinuousSynth(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize, int waveType); - ~ContinuousSynth(); - - void setChannelCount(int channels); - - void reset(); - - /** - * Mix in a signal to be heard at the given fundamental - * frequency. Any oscillator state will be maintained between - * process calls so as to provide a continuous sound. The f0 value - * may vary between calls. - * - * Supply f0 equal to 0 if you want to maintain the f0 from the - * previous block (without having to remember what it was). - * - * Supply f0 less than 0 for silence. You should continue to call - * this even when the signal is silent if you want to ensure the - * sound switches on and off cleanly. - */ - void mix(float **toBuffers, - float gain, - float pan, - float f0); - -private: - int m_channels; - sv_samplerate_t m_sampleRate; - sv_frame_t m_blockSize; - - double m_prevF0; - double m_phase; - - int m_wavetype; -}; - -#endif
--- a/audioio/PlaySpeedRangeMapper.cpp Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,101 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 QMUL. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#include "PlaySpeedRangeMapper.h" - -#include <iostream> -#include <cmath> - -// PlaySpeedRangeMapper maps a position in the range [0,120] on to a -// play speed factor on a logarithmic scale in the range 0.125 -> -// 8. This ensures that the desirable speed factors 0.25, 0.5, 1, 2, -// and 4 are all mapped to exact positions (respectively 20, 40, 60, -// 80, 100). - -// Note that the "factor" referred to below is a play speed factor -// (higher = faster, 1.0 = normal speed), the "value" is a percentage -// (higher = faster, 100 = normal speed), and the "position" is an -// integer step on the dial's scale (0-120, 60 = centre). - -PlaySpeedRangeMapper::PlaySpeedRangeMapper() : - m_minpos(0), - m_maxpos(120) -{ -} - -int -PlaySpeedRangeMapper::getPositionForValue(double value) const -{ - // value is percent - double factor = getFactorForValue(value); - int position = getPositionForFactor(factor); - return position; -} - -int -PlaySpeedRangeMapper::getPositionForValueUnclamped(double value) const -{ - // We don't really provide this - return getPositionForValue(value); -} - -double -PlaySpeedRangeMapper::getValueForPosition(int position) const -{ - double factor = getFactorForPosition(position); - double pc = getValueForFactor(factor); - return pc; -} - -double -PlaySpeedRangeMapper::getValueForPositionUnclamped(int position) const -{ - // We don't really provide this - return getValueForPosition(position); -} - -double -PlaySpeedRangeMapper::getValueForFactor(double factor) const -{ - return factor * 100.0; -} - -double -PlaySpeedRangeMapper::getFactorForValue(double value) const -{ - return value / 100.0; -} - -int -PlaySpeedRangeMapper::getPositionForFactor(double factor) const -{ - if (factor == 0) return m_minpos; - int pos = int(lrint((log2(factor) + 3.0) * 20.0)); - if (pos < m_minpos) pos = m_minpos; - if (pos > m_maxpos) pos = m_maxpos; - return pos; -} - -double -PlaySpeedRangeMapper::getFactorForPosition(int position) const -{ - return pow(2.0, double(position) * 0.05 - 3.0); -} - -QString -PlaySpeedRangeMapper::getUnit() const -{ - return "%"; -}
--- a/audioio/PlaySpeedRangeMapper.h Sat Jan 30 12:05:14 2016 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,49 +0,0 @@ -/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ - -/* - Sonic Visualiser - An audio file viewer and annotation editor. - Centre for Digital Music, Queen Mary, University of London. - This file copyright 2006 QMUL. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. See the file - COPYING included with this distribution for more information. -*/ - -#ifndef _PLAY_SPEED_RANGE_MAPPER_H_ -#define _PLAY_SPEED_RANGE_MAPPER_H_ - -#include "base/RangeMapper.h" - -class PlaySpeedRangeMapper : public RangeMapper -{ -public: - PlaySpeedRangeMapper(); - - int getMinPosition() const { return m_minpos; } - int getMaxPosition() const { return m_maxpos; } - - virtual int getPositionForValue(double value) const; - virtual int getPositionForValueUnclamped(double value) const; - - virtual double getValueForPosition(int position) const; - virtual double getValueForPositionUnclamped(int position) const; - - int getPositionForFactor(double factor) const; - double getValueForFactor(double factor) const; - - double getFactorForPosition(int position) const; - double getFactorForValue(double value) const; - - virtual QString getUnit() const; - -protected: - int m_minpos; - int m_maxpos; -}; - - -#endif
--- a/configure Sat Jan 30 12:05:14 2016 +0000 +++ b/configure Fri Jan 13 10:29:55 2017 +0000 @@ -646,16 +646,12 @@ libpulse_CFLAGS JACK_LIBS JACK_CFLAGS -portaudio_2_0_LIBS -portaudio_2_0_CFLAGS +portaudio_LIBS +portaudio_CFLAGS liblo_LIBS liblo_CFLAGS rubberband_LIBS rubberband_CFLAGS -vamphostsdk_LIBS -vamphostsdk_CFLAGS -vamp_LIBS -vamp_CFLAGS samplerate_LIBS samplerate_CFLAGS sndfile_LIBS @@ -756,16 +752,12 @@ sndfile_LIBS samplerate_CFLAGS samplerate_LIBS -vamp_CFLAGS -vamp_LIBS -vamphostsdk_CFLAGS -vamphostsdk_LIBS rubberband_CFLAGS rubberband_LIBS liblo_CFLAGS liblo_LIBS -portaudio_2_0_CFLAGS -portaudio_2_0_LIBS +portaudio_CFLAGS +portaudio_LIBS JACK_CFLAGS JACK_LIBS libpulse_CFLAGS @@ -1423,12 +1415,6 @@ C compiler flags for samplerate, overriding pkg-config samplerate_LIBS linker flags for samplerate, overriding pkg-config - vamp_CFLAGS C compiler flags for vamp, overriding pkg-config - vamp_LIBS linker flags for vamp, overriding pkg-config - vamphostsdk_CFLAGS - C compiler flags for vamphostsdk, overriding pkg-config - vamphostsdk_LIBS - linker flags for vamphostsdk, overriding pkg-config rubberband_CFLAGS C compiler flags for rubberband, overriding pkg-config rubberband_LIBS @@ -1436,10 +1422,10 @@ liblo_CFLAGS C compiler flags for liblo, overriding pkg-config liblo_LIBS linker flags for liblo, overriding pkg-config - portaudio_2_0_CFLAGS - C compiler flags for portaudio_2_0, overriding pkg-config - portaudio_2_0_LIBS - linker flags for portaudio_2_0, overriding pkg-config + portaudio_CFLAGS + C compiler flags for portaudio, overriding pkg-config + portaudio_LIBS + linker flags for portaudio, overriding pkg-config JACK_CFLAGS C compiler flags for JACK, overriding pkg-config JACK_LIBS linker flags for JACK, overriding pkg-config libpulse_CFLAGS @@ -5226,18 +5212,18 @@ fi -SV_MODULE_MODULE=vamp -SV_MODULE_VERSION_TEST="vamp >= 2.1" -SV_MODULE_HEADER=vamp/vamp.h -SV_MODULE_LIB= -SV_MODULE_FUNC= -SV_MODULE_HAVE=HAVE_$(echo vamp | tr 'a-z' 'A-Z') +SV_MODULE_MODULE=rubberband +SV_MODULE_VERSION_TEST="rubberband" +SV_MODULE_HEADER=rubberband/RubberBandStretcher.h +SV_MODULE_LIB=rubberband +SV_MODULE_FUNC=rubberband_new +SV_MODULE_HAVE=HAVE_$(echo rubberband | tr 'a-z' 'A-Z') SV_MODULE_FAILED=1 -if test -n "$vamp_LIBS" ; then +if test -n "$rubberband_LIBS" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: User set ${SV_MODULE_MODULE}_LIBS explicitly, skipping test for $SV_MODULE_MODULE" >&5 $as_echo "$as_me: User set ${SV_MODULE_MODULE}_LIBS explicitly, skipping test for $SV_MODULE_MODULE" >&6;} - CXXFLAGS="$CXXFLAGS $vamp_CFLAGS" - LIBS="$LIBS $vamp_LIBS" + CXXFLAGS="$CXXFLAGS $rubberband_CFLAGS" + LIBS="$LIBS $rubberband_LIBS" SV_MODULE_FAILED="" fi if test -z "$SV_MODULE_VERSION_TEST" ; then @@ -5246,11 +5232,11 @@ if test -n "$SV_MODULE_FAILED" && test -n "$PKG_CONFIG"; then pkg_failed=no -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for vamp" >&5 -$as_echo_n "checking for vamp... " >&6; } - -if test -n "$vamp_CFLAGS"; then - pkg_cv_vamp_CFLAGS="$vamp_CFLAGS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for rubberband" >&5 +$as_echo_n "checking for rubberband... " >&6; } + +if test -n "$rubberband_CFLAGS"; then + pkg_cv_rubberband_CFLAGS="$rubberband_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$SV_MODULE_VERSION_TEST\""; } >&5 @@ -5258,7 +5244,7 @@ ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_vamp_CFLAGS=`$PKG_CONFIG --cflags "$SV_MODULE_VERSION_TEST" 2>/dev/null` + pkg_cv_rubberband_CFLAGS=`$PKG_CONFIG --cflags "$SV_MODULE_VERSION_TEST" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -5266,8 +5252,8 @@ else pkg_failed=untried fi -if test -n "$vamp_LIBS"; then - pkg_cv_vamp_LIBS="$vamp_LIBS" +if test -n "$rubberband_LIBS"; then + pkg_cv_rubberband_LIBS="$rubberband_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$SV_MODULE_VERSION_TEST\""; } >&5 @@ -5275,7 +5261,7 @@ ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_vamp_LIBS=`$PKG_CONFIG --libs "$SV_MODULE_VERSION_TEST" 2>/dev/null` + pkg_cv_rubberband_LIBS=`$PKG_CONFIG --libs "$SV_MODULE_VERSION_TEST" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -5296,12 +5282,12 @@ _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then - vamp_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` + rubberband_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` else - vamp_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` + rubberband_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` fi # Put the nasty error message in config.log where it belongs - echo "$vamp_PKG_ERRORS" >&5 + echo "$rubberband_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find required module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&5 $as_echo "$as_me: Failed to find required module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&6;} @@ -5311,11 +5297,11 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find required module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&5 $as_echo "$as_me: Failed to find required module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&6;} else - vamp_CFLAGS=$pkg_cv_vamp_CFLAGS - vamp_LIBS=$pkg_cv_vamp_LIBS + rubberband_CFLAGS=$pkg_cv_rubberband_CFLAGS + rubberband_LIBS=$pkg_cv_rubberband_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } - HAVES="$HAVES $SV_MODULE_HAVE";CXXFLAGS="$CXXFLAGS $vamp_CFLAGS";LIBS="$LIBS $vamp_LIBS";SV_MODULE_FAILED="" + HAVES="$HAVES $SV_MODULE_HAVE";CXXFLAGS="$CXXFLAGS $rubberband_CFLAGS";LIBS="$LIBS $rubberband_LIBS";SV_MODULE_FAILED="" fi fi if test -n "$SV_MODULE_FAILED"; then @@ -5377,18 +5363,19 @@ fi -SV_MODULE_MODULE=vamphostsdk -SV_MODULE_VERSION_TEST="vamp-hostsdk >= 2.5" -SV_MODULE_HEADER=vamp-hostsdk/PluginLoader.h -SV_MODULE_LIB=vamp-hostsdk -SV_MODULE_FUNC=libvamphostsdk_v_2_5_present -SV_MODULE_HAVE=HAVE_$(echo vamphostsdk | tr 'a-z' 'A-Z') + +SV_MODULE_MODULE=liblo +SV_MODULE_VERSION_TEST="" +SV_MODULE_HEADER=lo/lo.h +SV_MODULE_LIB=lo +SV_MODULE_FUNC=lo_address_new +SV_MODULE_HAVE=HAVE_$(echo liblo | tr 'a-z' 'A-Z') SV_MODULE_FAILED=1 -if test -n "$vamphostsdk_LIBS" ; then +if test -n "$liblo_LIBS" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: User set ${SV_MODULE_MODULE}_LIBS explicitly, skipping test for $SV_MODULE_MODULE" >&5 $as_echo "$as_me: User set ${SV_MODULE_MODULE}_LIBS explicitly, skipping test for $SV_MODULE_MODULE" >&6;} - CXXFLAGS="$CXXFLAGS $vamphostsdk_CFLAGS" - LIBS="$LIBS $vamphostsdk_LIBS" + CXXFLAGS="$CXXFLAGS $liblo_CFLAGS" + LIBS="$LIBS $liblo_LIBS" SV_MODULE_FAILED="" fi if test -z "$SV_MODULE_VERSION_TEST" ; then @@ -5397,11 +5384,11 @@ if test -n "$SV_MODULE_FAILED" && test -n "$PKG_CONFIG"; then pkg_failed=no -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for vamphostsdk" >&5 -$as_echo_n "checking for vamphostsdk... " >&6; } - -if test -n "$vamphostsdk_CFLAGS"; then - pkg_cv_vamphostsdk_CFLAGS="$vamphostsdk_CFLAGS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for liblo" >&5 +$as_echo_n "checking for liblo... " >&6; } + +if test -n "$liblo_CFLAGS"; then + pkg_cv_liblo_CFLAGS="$liblo_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$SV_MODULE_VERSION_TEST\""; } >&5 @@ -5409,7 +5396,7 @@ ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_vamphostsdk_CFLAGS=`$PKG_CONFIG --cflags "$SV_MODULE_VERSION_TEST" 2>/dev/null` + pkg_cv_liblo_CFLAGS=`$PKG_CONFIG --cflags "$SV_MODULE_VERSION_TEST" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -5417,8 +5404,8 @@ else pkg_failed=untried fi -if test -n "$vamphostsdk_LIBS"; then - pkg_cv_vamphostsdk_LIBS="$vamphostsdk_LIBS" +if test -n "$liblo_LIBS"; then + pkg_cv_liblo_LIBS="$liblo_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$SV_MODULE_VERSION_TEST\""; } >&5 @@ -5426,7 +5413,7 @@ ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_vamphostsdk_LIBS=`$PKG_CONFIG --libs "$SV_MODULE_VERSION_TEST" 2>/dev/null` + pkg_cv_liblo_LIBS=`$PKG_CONFIG --libs "$SV_MODULE_VERSION_TEST" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -5447,40 +5434,42 @@ _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then - vamphostsdk_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` + liblo_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` else - vamphostsdk_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` + liblo_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` fi # Put the nasty error message in config.log where it belongs - echo "$vamphostsdk_PKG_ERRORS" >&5 - - { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find required module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&5 -$as_echo "$as_me: Failed to find required module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&6;} + echo "$liblo_PKG_ERRORS" >&5 + + { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find optional module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&5 +$as_echo "$as_me: Failed to find optional module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&6;} elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find required module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&5 -$as_echo "$as_me: Failed to find required module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&6;} -else - vamphostsdk_CFLAGS=$pkg_cv_vamphostsdk_CFLAGS - vamphostsdk_LIBS=$pkg_cv_vamphostsdk_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find optional module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&5 +$as_echo "$as_me: Failed to find optional module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&6;} +else + liblo_CFLAGS=$pkg_cv_liblo_CFLAGS + liblo_LIBS=$pkg_cv_liblo_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } - HAVES="$HAVES $SV_MODULE_HAVE";CXXFLAGS="$CXXFLAGS $vamphostsdk_CFLAGS";LIBS="$LIBS $vamphostsdk_LIBS";SV_MODULE_FAILED="" + HAVES="$HAVES $SV_MODULE_HAVE";CXXFLAGS="$CXXFLAGS $liblo_CFLAGS";LIBS="$LIBS $liblo_LIBS";SV_MODULE_FAILED="" fi fi if test -n "$SV_MODULE_FAILED"; then as_ac_Header=`$as_echo "ac_cv_header_$SV_MODULE_HEADER" | $as_tr_sh` ac_fn_cxx_check_header_mongrel "$LINENO" "$SV_MODULE_HEADER" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - HAVES="$HAVES $SV_MODULE_HAVE" -else - as_fn_error $? "Failed to find header $SV_MODULE_HEADER for required module $SV_MODULE_MODULE" "$LINENO" 5 -fi - - - if test -n "$SV_MODULE_LIB"; then - as_ac_Lib=`$as_echo "ac_cv_lib_$SV_MODULE_LIB''_$SV_MODULE_FUNC" | $as_tr_sh` + HAVES="$HAVES $SV_MODULE_HAVE";SV_MODULE_FAILED="" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find header $SV_MODULE_HEADER for optional module $SV_MODULE_MODULE" >&5 +$as_echo "$as_me: Failed to find header $SV_MODULE_HEADER for optional module $SV_MODULE_MODULE" >&6;} +fi + + + if test -z "$SV_MODULE_FAILED"; then + if test -n "$SV_MODULE_LIB"; then + as_ac_Lib=`$as_echo "ac_cv_lib_$SV_MODULE_LIB''_$SV_MODULE_FUNC" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $SV_MODULE_FUNC in -l$SV_MODULE_LIB" >&5 $as_echo_n "checking for $SV_MODULE_FUNC in -l$SV_MODULE_LIB... " >&6; } if eval \${$as_ac_Lib+:} false; then : @@ -5521,25 +5510,27 @@ if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : LIBS="$LIBS -l$SV_MODULE_LIB" else - as_fn_error $? "Failed to find library $SV_MODULE_LIB for required module $SV_MODULE_MODULE" "$LINENO" 5 -fi - + { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find library $SV_MODULE_LIB for optional module $SV_MODULE_MODULE" >&5 +$as_echo "$as_me: Failed to find library $SV_MODULE_LIB for optional module $SV_MODULE_MODULE" >&6;} +fi + + fi fi fi -SV_MODULE_MODULE=rubberband -SV_MODULE_VERSION_TEST="rubberband" -SV_MODULE_HEADER=rubberband/RubberBandStretcher.h -SV_MODULE_LIB=rubberband -SV_MODULE_FUNC=rubberband_new -SV_MODULE_HAVE=HAVE_$(echo rubberband | tr 'a-z' 'A-Z') +SV_MODULE_MODULE=portaudio +SV_MODULE_VERSION_TEST="portaudio-2.0 >= 19" +SV_MODULE_HEADER=portaudio.h +SV_MODULE_LIB=portaudio +SV_MODULE_FUNC=Pa_IsFormatSupported +SV_MODULE_HAVE=HAVE_$(echo portaudio | tr 'a-z' 'A-Z') SV_MODULE_FAILED=1 -if test -n "$rubberband_LIBS" ; then +if test -n "$portaudio_LIBS" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: User set ${SV_MODULE_MODULE}_LIBS explicitly, skipping test for $SV_MODULE_MODULE" >&5 $as_echo "$as_me: User set ${SV_MODULE_MODULE}_LIBS explicitly, skipping test for $SV_MODULE_MODULE" >&6;} - CXXFLAGS="$CXXFLAGS $rubberband_CFLAGS" - LIBS="$LIBS $rubberband_LIBS" + CXXFLAGS="$CXXFLAGS $portaudio_CFLAGS" + LIBS="$LIBS $portaudio_LIBS" SV_MODULE_FAILED="" fi if test -z "$SV_MODULE_VERSION_TEST" ; then @@ -5548,11 +5539,11 @@ if test -n "$SV_MODULE_FAILED" && test -n "$PKG_CONFIG"; then pkg_failed=no -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for rubberband" >&5 -$as_echo_n "checking for rubberband... " >&6; } - -if test -n "$rubberband_CFLAGS"; then - pkg_cv_rubberband_CFLAGS="$rubberband_CFLAGS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for portaudio" >&5 +$as_echo_n "checking for portaudio... " >&6; } + +if test -n "$portaudio_CFLAGS"; then + pkg_cv_portaudio_CFLAGS="$portaudio_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$SV_MODULE_VERSION_TEST\""; } >&5 @@ -5560,7 +5551,7 @@ ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_rubberband_CFLAGS=`$PKG_CONFIG --cflags "$SV_MODULE_VERSION_TEST" 2>/dev/null` + pkg_cv_portaudio_CFLAGS=`$PKG_CONFIG --cflags "$SV_MODULE_VERSION_TEST" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -5568,8 +5559,8 @@ else pkg_failed=untried fi -if test -n "$rubberband_LIBS"; then - pkg_cv_rubberband_LIBS="$rubberband_LIBS" +if test -n "$portaudio_LIBS"; then + pkg_cv_portaudio_LIBS="$portaudio_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$SV_MODULE_VERSION_TEST\""; } >&5 @@ -5577,7 +5568,7 @@ ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_rubberband_LIBS=`$PKG_CONFIG --libs "$SV_MODULE_VERSION_TEST" 2>/dev/null` + pkg_cv_portaudio_LIBS=`$PKG_CONFIG --libs "$SV_MODULE_VERSION_TEST" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -5598,164 +5589,12 @@ _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then - rubberband_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` + portaudio_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` else - rubberband_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` + portaudio_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` fi # Put the nasty error message in config.log where it belongs - echo "$rubberband_PKG_ERRORS" >&5 - - { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find required module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&5 -$as_echo "$as_me: Failed to find required module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&6;} -elif test $pkg_failed = untried; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find required module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&5 -$as_echo "$as_me: Failed to find required module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&6;} -else - rubberband_CFLAGS=$pkg_cv_rubberband_CFLAGS - rubberband_LIBS=$pkg_cv_rubberband_LIBS - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - HAVES="$HAVES $SV_MODULE_HAVE";CXXFLAGS="$CXXFLAGS $rubberband_CFLAGS";LIBS="$LIBS $rubberband_LIBS";SV_MODULE_FAILED="" -fi -fi -if test -n "$SV_MODULE_FAILED"; then - as_ac_Header=`$as_echo "ac_cv_header_$SV_MODULE_HEADER" | $as_tr_sh` -ac_fn_cxx_check_header_mongrel "$LINENO" "$SV_MODULE_HEADER" "$as_ac_Header" "$ac_includes_default" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - HAVES="$HAVES $SV_MODULE_HAVE" -else - as_fn_error $? "Failed to find header $SV_MODULE_HEADER for required module $SV_MODULE_MODULE" "$LINENO" 5 -fi - - - if test -n "$SV_MODULE_LIB"; then - as_ac_Lib=`$as_echo "ac_cv_lib_$SV_MODULE_LIB''_$SV_MODULE_FUNC" | $as_tr_sh` -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $SV_MODULE_FUNC in -l$SV_MODULE_LIB" >&5 -$as_echo_n "checking for $SV_MODULE_FUNC in -l$SV_MODULE_LIB... " >&6; } -if eval \${$as_ac_Lib+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-l$SV_MODULE_LIB $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $SV_MODULE_FUNC (); -int -main () -{ -return $SV_MODULE_FUNC (); - ; - return 0; -} -_ACEOF -if ac_fn_cxx_try_link "$LINENO"; then : - eval "$as_ac_Lib=yes" -else - eval "$as_ac_Lib=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -eval ac_res=\$$as_ac_Lib - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : - LIBS="$LIBS -l$SV_MODULE_LIB" -else - as_fn_error $? "Failed to find library $SV_MODULE_LIB for required module $SV_MODULE_MODULE" "$LINENO" 5 -fi - - fi -fi - - - -SV_MODULE_MODULE=liblo -SV_MODULE_VERSION_TEST="" -SV_MODULE_HEADER=lo/lo.h -SV_MODULE_LIB=lo -SV_MODULE_FUNC=lo_address_new -SV_MODULE_HAVE=HAVE_$(echo liblo | tr 'a-z' 'A-Z') -SV_MODULE_FAILED=1 -if test -n "$liblo_LIBS" ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: User set ${SV_MODULE_MODULE}_LIBS explicitly, skipping test for $SV_MODULE_MODULE" >&5 -$as_echo "$as_me: User set ${SV_MODULE_MODULE}_LIBS explicitly, skipping test for $SV_MODULE_MODULE" >&6;} - CXXFLAGS="$CXXFLAGS $liblo_CFLAGS" - LIBS="$LIBS $liblo_LIBS" - SV_MODULE_FAILED="" -fi -if test -z "$SV_MODULE_VERSION_TEST" ; then - SV_MODULE_VERSION_TEST=$SV_MODULE_MODULE -fi -if test -n "$SV_MODULE_FAILED" && test -n "$PKG_CONFIG"; then - -pkg_failed=no -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for liblo" >&5 -$as_echo_n "checking for liblo... " >&6; } - -if test -n "$liblo_CFLAGS"; then - pkg_cv_liblo_CFLAGS="$liblo_CFLAGS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$SV_MODULE_VERSION_TEST\""; } >&5 - ($PKG_CONFIG --exists --print-errors "$SV_MODULE_VERSION_TEST") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_liblo_CFLAGS=`$PKG_CONFIG --cflags "$SV_MODULE_VERSION_TEST" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi -if test -n "$liblo_LIBS"; then - pkg_cv_liblo_LIBS="$liblo_LIBS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$SV_MODULE_VERSION_TEST\""; } >&5 - ($PKG_CONFIG --exists --print-errors "$SV_MODULE_VERSION_TEST") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_liblo_LIBS=`$PKG_CONFIG --libs "$SV_MODULE_VERSION_TEST" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi - - - -if test $pkg_failed = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - -if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then - _pkg_short_errors_supported=yes -else - _pkg_short_errors_supported=no -fi - if test $_pkg_short_errors_supported = yes; then - liblo_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` - else - liblo_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` - fi - # Put the nasty error message in config.log where it belongs - echo "$liblo_PKG_ERRORS" >&5 + echo "$portaudio_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find optional module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&5 $as_echo "$as_me: Failed to find optional module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&6;} @@ -5765,166 +5604,11 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find optional module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&5 $as_echo "$as_me: Failed to find optional module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&6;} else - liblo_CFLAGS=$pkg_cv_liblo_CFLAGS - liblo_LIBS=$pkg_cv_liblo_LIBS + portaudio_CFLAGS=$pkg_cv_portaudio_CFLAGS + portaudio_LIBS=$pkg_cv_portaudio_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } - HAVES="$HAVES $SV_MODULE_HAVE";CXXFLAGS="$CXXFLAGS $liblo_CFLAGS";LIBS="$LIBS $liblo_LIBS";SV_MODULE_FAILED="" -fi -fi -if test -n "$SV_MODULE_FAILED"; then - as_ac_Header=`$as_echo "ac_cv_header_$SV_MODULE_HEADER" | $as_tr_sh` -ac_fn_cxx_check_header_mongrel "$LINENO" "$SV_MODULE_HEADER" "$as_ac_Header" "$ac_includes_default" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - HAVES="$HAVES $SV_MODULE_HAVE";SV_MODULE_FAILED="" -else - { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find header $SV_MODULE_HEADER for optional module $SV_MODULE_MODULE" >&5 -$as_echo "$as_me: Failed to find header $SV_MODULE_HEADER for optional module $SV_MODULE_MODULE" >&6;} -fi - - - if test -z "$SV_MODULE_FAILED"; then - if test -n "$SV_MODULE_LIB"; then - as_ac_Lib=`$as_echo "ac_cv_lib_$SV_MODULE_LIB''_$SV_MODULE_FUNC" | $as_tr_sh` -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $SV_MODULE_FUNC in -l$SV_MODULE_LIB" >&5 -$as_echo_n "checking for $SV_MODULE_FUNC in -l$SV_MODULE_LIB... " >&6; } -if eval \${$as_ac_Lib+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-l$SV_MODULE_LIB $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $SV_MODULE_FUNC (); -int -main () -{ -return $SV_MODULE_FUNC (); - ; - return 0; -} -_ACEOF -if ac_fn_cxx_try_link "$LINENO"; then : - eval "$as_ac_Lib=yes" -else - eval "$as_ac_Lib=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -eval ac_res=\$$as_ac_Lib - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : - LIBS="$LIBS -l$SV_MODULE_LIB" -else - { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find library $SV_MODULE_LIB for optional module $SV_MODULE_MODULE" >&5 -$as_echo "$as_me: Failed to find library $SV_MODULE_LIB for optional module $SV_MODULE_MODULE" >&6;} -fi - - fi - fi -fi - - -SV_MODULE_MODULE=portaudio_2_0 -SV_MODULE_VERSION_TEST="portaudio-2.0 >= 19" -SV_MODULE_HEADER=portaudio.h -SV_MODULE_LIB=portaudio -SV_MODULE_FUNC=Pa_IsFormatSupported -SV_MODULE_HAVE=HAVE_$(echo portaudio_2_0 | tr 'a-z' 'A-Z') -SV_MODULE_FAILED=1 -if test -n "$portaudio_2_0_LIBS" ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: User set ${SV_MODULE_MODULE}_LIBS explicitly, skipping test for $SV_MODULE_MODULE" >&5 -$as_echo "$as_me: User set ${SV_MODULE_MODULE}_LIBS explicitly, skipping test for $SV_MODULE_MODULE" >&6;} - CXXFLAGS="$CXXFLAGS $portaudio_2_0_CFLAGS" - LIBS="$LIBS $portaudio_2_0_LIBS" - SV_MODULE_FAILED="" -fi -if test -z "$SV_MODULE_VERSION_TEST" ; then - SV_MODULE_VERSION_TEST=$SV_MODULE_MODULE -fi -if test -n "$SV_MODULE_FAILED" && test -n "$PKG_CONFIG"; then - -pkg_failed=no -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for portaudio_2_0" >&5 -$as_echo_n "checking for portaudio_2_0... " >&6; } - -if test -n "$portaudio_2_0_CFLAGS"; then - pkg_cv_portaudio_2_0_CFLAGS="$portaudio_2_0_CFLAGS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$SV_MODULE_VERSION_TEST\""; } >&5 - ($PKG_CONFIG --exists --print-errors "$SV_MODULE_VERSION_TEST") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_portaudio_2_0_CFLAGS=`$PKG_CONFIG --cflags "$SV_MODULE_VERSION_TEST" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi -if test -n "$portaudio_2_0_LIBS"; then - pkg_cv_portaudio_2_0_LIBS="$portaudio_2_0_LIBS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$SV_MODULE_VERSION_TEST\""; } >&5 - ($PKG_CONFIG --exists --print-errors "$SV_MODULE_VERSION_TEST") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_portaudio_2_0_LIBS=`$PKG_CONFIG --libs "$SV_MODULE_VERSION_TEST" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi - - - -if test $pkg_failed = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - -if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then - _pkg_short_errors_supported=yes -else - _pkg_short_errors_supported=no -fi - if test $_pkg_short_errors_supported = yes; then - portaudio_2_0_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` - else - portaudio_2_0_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$SV_MODULE_VERSION_TEST" 2>&1` - fi - # Put the nasty error message in config.log where it belongs - echo "$portaudio_2_0_PKG_ERRORS" >&5 - - { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find optional module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&5 -$as_echo "$as_me: Failed to find optional module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&6;} -elif test $pkg_failed = untried; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: Failed to find optional module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&5 -$as_echo "$as_me: Failed to find optional module $SV_MODULE_MODULE using pkg-config, trying again by old-fashioned means" >&6;} -else - portaudio_2_0_CFLAGS=$pkg_cv_portaudio_2_0_CFLAGS - portaudio_2_0_LIBS=$pkg_cv_portaudio_2_0_LIBS - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - HAVES="$HAVES $SV_MODULE_HAVE";CXXFLAGS="$CXXFLAGS $portaudio_2_0_CFLAGS";LIBS="$LIBS $portaudio_2_0_LIBS";SV_MODULE_FAILED="" + HAVES="$HAVES $SV_MODULE_HAVE";CXXFLAGS="$CXXFLAGS $portaudio_CFLAGS";LIBS="$LIBS $portaudio_LIBS";SV_MODULE_FAILED="" fi fi if test -n "$SV_MODULE_FAILED"; then
--- a/configure.ac Sat Jan 30 12:05:14 2016 +0000 +++ b/configure.ac Fri Jan 13 10:29:55 2017 +0000 @@ -83,12 +83,10 @@ SV_MODULE_REQUIRED([fftw3f],[fftw3f >= 3.0.0],[fftw3.h],[fftw3f],[fftwf_execute]) SV_MODULE_REQUIRED([sndfile],[sndfile >= 1.0.16],[sndfile.h],[sndfile],[sf_open]) SV_MODULE_REQUIRED([samplerate],[samplerate >= 0.1.2],[samplerate.h],[samplerate],[src_new]) -SV_MODULE_REQUIRED([vamp],[vamp >= 2.1],[vamp/vamp.h],[],[]) -SV_MODULE_REQUIRED([vamphostsdk],[vamp-hostsdk >= 2.5],[vamp-hostsdk/PluginLoader.h],[vamp-hostsdk],[libvamphostsdk_v_2_5_present]) SV_MODULE_REQUIRED([rubberband],[rubberband],[rubberband/RubberBandStretcher.h],[rubberband],[rubberband_new]) SV_MODULE_OPTIONAL([liblo],[],[lo/lo.h],[lo],[lo_address_new]) -SV_MODULE_OPTIONAL([portaudio_2_0],[portaudio-2.0 >= 19],[portaudio.h],[portaudio],[Pa_IsFormatSupported]) +SV_MODULE_OPTIONAL([portaudio],[portaudio-2.0 >= 19],[portaudio.h],[portaudio],[Pa_IsFormatSupported]) SV_MODULE_OPTIONAL([JACK],[jack >= 0.100],[jack/jack.h],[jack],[jack_client_open]) SV_MODULE_OPTIONAL([libpulse],[libpulse >= 0.9],[pulse/pulseaudio.h],[pulse],[pa_stream_new]) SV_MODULE_OPTIONAL([lrdf],[lrdf >= 0.2],[lrdf.h],[lrdf],[lrdf_init])
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/files.pri Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,28 @@ + +SVAPP_HEADERS += \ + audio/AudioCallbackPlaySource.h \ + audio/AudioCallbackRecordTarget.h \ + audio/AudioGenerator.h \ + audio/ClipMixer.h \ + audio/ContinuousSynth.h \ + audio/PlaySpeedRangeMapper.h \ + framework/Align.h \ + framework/Document.h \ + framework/MainWindowBase.h \ + framework/SVFileReader.h \ + framework/TransformUserConfigurator.h \ + framework/VersionTester.h + +SVAPP_SOURCES += \ + audio/AudioCallbackPlaySource.cpp \ + audio/AudioCallbackRecordTarget.cpp \ + audio/AudioGenerator.cpp \ + audio/ClipMixer.cpp \ + audio/ContinuousSynth.cpp \ + audio/PlaySpeedRangeMapper.cpp \ + framework/Align.cpp \ + framework/Document.cpp \ + framework/MainWindowBase.cpp \ + framework/SVFileReader.cpp \ + framework/TransformUserConfigurator.cpp \ + framework/VersionTester.cpp
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/framework/Align.cpp Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,310 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + This file copyright 2006 Chris Cannam and QMUL. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#include "Align.h" + +#include "data/model/WaveFileModel.h" +#include "data/model/ReadOnlyWaveFileModel.h" +#include "data/model/AggregateWaveModel.h" +#include "data/model/RangeSummarisableTimeValueModel.h" +#include "data/model/SparseTimeValueModel.h" +#include "data/model/AlignmentModel.h" + +#include "data/fileio/CSVFileReader.h" + +#include "transform/TransformFactory.h" +#include "transform/ModelTransformerFactory.h" +#include "transform/FeatureExtractionModelTransformer.h" + +#include <QProcess> +#include <QSettings> +#include <QApplication> + +bool +Align::alignModel(Model *ref, Model *other) +{ + QSettings settings; + settings.beginGroup("Preferences"); + bool useProgram = settings.value("use-external-alignment", false).toBool(); + QString program = settings.value("external-alignment-program", "").toString(); + settings.endGroup(); + + if (useProgram && (program != "")) { + return alignModelViaProgram(ref, other, program); + } else { + return alignModelViaTransform(ref, other); + } +} + +QString +Align::getAlignmentTransformName() +{ + QSettings settings; + settings.beginGroup("Alignment"); + TransformId id = + settings.value("transform-id", + "vamp:match-vamp-plugin:match:path").toString(); + settings.endGroup(); + return id; +} + +bool +Align::canAlign() +{ + TransformId id = getAlignmentTransformName(); + TransformFactory *factory = TransformFactory::getInstance(); + return factory->haveTransform(id); +} + +bool +Align::alignModelViaTransform(Model *ref, Model *other) +{ + RangeSummarisableTimeValueModel *reference = qobject_cast + <RangeSummarisableTimeValueModel *>(ref); + + RangeSummarisableTimeValueModel *rm = qobject_cast + <RangeSummarisableTimeValueModel *>(other); + + if (!reference || !rm) return false; // but this should have been tested already + + // This involves creating three new models: + + // 1. an AggregateWaveModel to provide the mixdowns of the main + // model and the new model in its two channels, as input to the + // MATCH plugin + + // 2. a SparseTimeValueModel, which is the model automatically + // created by FeatureExtractionPluginTransformer when running the + // MATCH plugin (thus containing the alignment path) + + // 3. an AlignmentModel, which stores the path model and carries + // out alignment lookups on it. + + // The first two of these are provided as arguments to the + // constructor for the third, which takes responsibility for + // deleting them. The AlignmentModel, meanwhile, is passed to the + // new model we are aligning, which also takes responsibility for + // it. We should not have to delete any of these new models here. + + AggregateWaveModel::ChannelSpecList components; + + components.push_back(AggregateWaveModel::ModelChannelSpec + (reference, -1)); + + components.push_back(AggregateWaveModel::ModelChannelSpec + (rm, -1)); + + Model *aggregateModel = new AggregateWaveModel(components); + ModelTransformer::Input aggregate(aggregateModel); + + TransformId id = getAlignmentTransformName(); + + TransformFactory *tf = TransformFactory::getInstance(); + + Transform transform = tf->getDefaultTransformFor + (id, aggregateModel->getSampleRate()); + + transform.setStepSize(transform.getBlockSize()/2); + transform.setParameter("serialise", 1); + transform.setParameter("smooth", 0); + + SVDEBUG << "Align::alignModel: Alignment transform step size " << transform.getStepSize() << ", block size " << transform.getBlockSize() << endl; + + ModelTransformerFactory *mtf = ModelTransformerFactory::getInstance(); + + QString message; + Model *transformOutput = mtf->transform(transform, aggregate, message); + + if (!transformOutput) { + transform.setStepSize(0); + transformOutput = mtf->transform(transform, aggregate, message); + } + + SparseTimeValueModel *path = dynamic_cast<SparseTimeValueModel *> + (transformOutput); + + if (!path) { + cerr << "Align::alignModel: ERROR: Failed to create alignment path (no MATCH plugin?)" << endl; + delete transformOutput; + delete aggregateModel; + m_error = message; + return false; + } + + path->setCompletion(0); + + AlignmentModel *alignmentModel = new AlignmentModel + (reference, other, aggregateModel, path); + + connect(alignmentModel, SIGNAL(completionChanged()), + this, SLOT(alignmentCompletionChanged())); + + rm->setAlignment(alignmentModel); + + return true; +} + +void +Align::alignmentCompletionChanged() +{ + AlignmentModel *am = qobject_cast<AlignmentModel *>(sender()); + if (!am) return; + if (am->isReady()) { + disconnect(am, SIGNAL(completionChanged()), + this, SLOT(alignmentCompletionChanged())); + emit alignmentComplete(am); + } +} + +bool +Align::alignModelViaProgram(Model *ref, Model *other, QString program) +{ + WaveFileModel *reference = qobject_cast<WaveFileModel *>(ref); + WaveFileModel *rm = qobject_cast<WaveFileModel *>(other); + + if (!reference || !rm) { + return false; // but this should have been tested already + } + + while (!reference->isReady(0) || !rm->isReady(0)) { + qApp->processEvents(); + } + + // Run an external program, passing to it paths to the main + // model's audio file and the new model's audio file. It returns + // the path in CSV form through stdout. + + ReadOnlyWaveFileModel *roref = qobject_cast<ReadOnlyWaveFileModel *>(reference); + ReadOnlyWaveFileModel *rorm = qobject_cast<ReadOnlyWaveFileModel *>(rm); + if (!roref || !rorm) { + cerr << "ERROR: Align::alignModelViaProgram: Can't align non-read-only models via program (no local filename available)" << endl; + return false; + } + + QString refPath = roref->getLocalFilename(); + QString otherPath = rorm->getLocalFilename(); + + if (refPath == "" || otherPath == "") { + m_error = "Failed to find local filepath for wave-file model"; + return false; + } + + m_error = ""; + + AlignmentModel *alignmentModel = new AlignmentModel(reference, other, 0, 0); + rm->setAlignment(alignmentModel); + + QProcess *process = new QProcess; + QStringList args; + args << refPath << otherPath; + + connect(process, SIGNAL(finished(int, QProcess::ExitStatus)), + this, SLOT(alignmentProgramFinished(int, QProcess::ExitStatus))); + + m_processModels[process] = alignmentModel; + process->start(program, args); + + bool success = process->waitForStarted(); + + if (!success) { + cerr << "ERROR: Align::alignModelViaProgram: Program did not start" + << endl; + m_error = "Alignment program could not be started"; + m_processModels.erase(process); + rm->setAlignment(0); // deletes alignmentModel as well + delete process; + } + + return success; +} + +void +Align::alignmentProgramFinished(int exitCode, QProcess::ExitStatus status) +{ + cerr << "Align::alignmentProgramFinished" << endl; + + QProcess *process = qobject_cast<QProcess *>(sender()); + + if (m_processModels.find(process) == m_processModels.end()) { + cerr << "ERROR: Align::alignmentProgramFinished: Process " << process + << " not found in process model map!" << endl; + return; + } + + AlignmentModel *alignmentModel = m_processModels[process]; + + if (exitCode == 0 && status == 0) { + + CSVFormat format; + format.setModelType(CSVFormat::TwoDimensionalModel); + format.setTimingType(CSVFormat::ExplicitTiming); + format.setTimeUnits(CSVFormat::TimeSeconds); + format.setColumnCount(2); + // The output format has time in the reference file first, and + // time in the "other" file in the second column. This is a + // more natural approach for a command-line alignment tool, + // but it's the opposite of what we expect for native + // alignment paths, which map from "other" file to + // reference. These column purpose settings reflect that. + format.setColumnPurpose(1, CSVFormat::ColumnStartTime); + format.setColumnPurpose(0, CSVFormat::ColumnValue); + format.setAllowQuoting(false); + format.setSeparator(','); + + CSVFileReader reader(process, format, alignmentModel->getSampleRate()); + if (!reader.isOK()) { + cerr << "ERROR: Align::alignmentProgramFinished: Failed to parse output" + << endl; + m_error = QString("Failed to parse output of program: %1") + .arg(reader.getError()); + goto done; + } + + Model *csvOutput = reader.load(); + + SparseTimeValueModel *path = qobject_cast<SparseTimeValueModel *>(csvOutput); + if (!path) { + cerr << "ERROR: Align::alignmentProgramFinished: Output did not convert to sparse time-value model" + << endl; + m_error = QString("Output of program did not produce sparse time-value model"); + goto done; + } + + if (path->getPoints().empty()) { + cerr << "ERROR: Align::alignmentProgramFinished: Output contained no mappings" + << endl; + m_error = QString("Output of alignment program contained no mappings"); + goto done; + } + + cerr << "Align::alignmentProgramFinished: Setting alignment path (" + << path->getPoints().size() << " point(s))" << endl; + + alignmentModel->setPathFrom(path); + + emit alignmentComplete(alignmentModel); + + } else { + cerr << "ERROR: Align::alignmentProgramFinished: Aligner program " + << "failed: exit code " << exitCode << ", status " << status + << endl; + m_error = "Aligner process returned non-zero exit status"; + } + +done: + m_processModels.erase(process); + delete process; +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/framework/Align.h Fri Jan 13 10:29:55 2017 +0000 @@ -0,0 +1,83 @@ +/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ + +/* + Sonic Visualiser + An audio file viewer and annotation editor. + Centre for Digital Music, Queen Mary, University of London. + This file copyright 2006 Chris Cannam and QMUL. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. See the file + COPYING included with this distribution for more information. +*/ + +#ifndef ALIGN_H +#define ALIGN_H + +#include <QString> +#include <QObject> +#include <QProcess> +#include <set> + +class Model; +class AlignmentModel; + +class Align : public QObject +{ + Q_OBJECT + +public: + Align() : m_error("") { } + + /** + * Align the "other" model to the reference, attaching an + * AlignmentModel to it. Alignment is carried out by the method + * configured in the user preferences (either a plugin transform + * or an external process) and is done asynchronously. + * + * A single Align object may carry out many simultanous alignment + * calls -- you do not need to create a new Align object each + * time, nor to wait for an alignment to be complete before + * starting a new one. + * + * The Align object must survive after this call, for at least as + * long as the alignment takes. The usual expectation is that the + * Align object will simply share the process or document + * lifespan. + */ + bool alignModel(Model *reference, Model *other); // via user preference + + bool alignModelViaTransform(Model *reference, Model *other); + bool alignModelViaProgram(Model *reference, Model *other, QString program); + + /** + * Return true if the alignment facility is available (relevant + * plugin installed, etc). + */ + static bool canAlign(); + + QString getError() const { return m_error; } + +signals: + /** + * Emitted when an alignment is successfully completed. The + * reference and other models can be queried from the alignment + * model. + */ + void alignmentComplete(AlignmentModel *alignment); + +private slots: + void alignmentCompletionChanged(); + void alignmentProgramFinished(int, QProcess::ExitStatus); + +private: + static QString getAlignmentTransformName(); + + QString m_error; + std::map<QProcess *, AlignmentModel *> m_processModels; +}; + +#endif +
--- a/framework/Document.cpp Sat Jan 30 12:05:14 2016 +0000 +++ b/framework/Document.cpp Fri Jan 13 10:29:55 2017 +0000 @@ -15,6 +15,8 @@ #include "Document.h" +#include "Align.h" + #include "data/model/WaveFileModel.h" #include "data/model/WritableWaveFileModel.h" #include "data/model/DenseThreeDimensionalModel.h" @@ -36,10 +38,8 @@ #include <iostream> #include <typeinfo> -// For alignment: -#include "data/model/AggregateWaveModel.h" -#include "data/model/SparseTimeValueModel.h" #include "data/model/AlignmentModel.h" +#include "Align.h" using std::vector; @@ -49,7 +49,8 @@ Document::Document() : m_mainModel(0), - m_autoAlignment(false) + m_autoAlignment(false), + m_align(new Align()) { connect(this, SIGNAL(modelAboutToBeDeleted(Model *)), @@ -60,6 +61,9 @@ SIGNAL(transformFailed(QString, QString)), this, SIGNAL(modelGenerationFailed(QString, QString))); + + connect(m_align, SIGNAL(alignmentComplete(AlignmentModel *)), + this, SIGNAL(alignmentComplete(AlignmentModel *))); } Document::~Document() @@ -736,6 +740,10 @@ // remember is correct for what was actually applied, with the // current plugin version. + //!!! would be nice to short-circuit this -- the version is + //!!! static data, shouldn't have to construct a plugin for it + //!!! (which may be expensive in Piper-world) + Transform applied = transforms[j]; applied.setPluginVersion (TransformFactory::getInstance()-> @@ -1038,24 +1046,10 @@ return (m_models.find(const_cast<Model *>(model)) != m_models.end()); } -TransformId -Document::getAlignmentTransformName() +bool +Document::canAlign() { - QSettings settings; - settings.beginGroup("Alignment"); - TransformId id = - settings.value("transform-id", - "vamp:match-vamp-plugin:match:path").toString(); - settings.endGroup(); - return id; -} - -bool -Document::canAlign() -{ - TransformId id = getAlignmentTransformName(); - TransformFactory *factory = TransformFactory::getInstance(); - return factory->haveTransform(id); + return Align::canAlign(); } void @@ -1090,75 +1084,10 @@ return; } - // This involves creating three new models: - - // 1. an AggregateWaveModel to provide the mixdowns of the main - // model and the new model in its two channels, as input to the - // MATCH plugin - - // 2. a SparseTimeValueModel, which is the model automatically - // created by FeatureExtractionPluginTransformer when running the - // MATCH plugin (thus containing the alignment path) - - // 3. an AlignmentModel, which stores the path model and carries - // out alignment lookups on it. - - // The first two of these are provided as arguments to the - // constructor for the third, which takes responsibility for - // deleting them. The AlignmentModel, meanwhile, is passed to the - // new model we are aligning, which also takes responsibility for - // it. We should not have to delete any of these new models here. - - AggregateWaveModel::ChannelSpecList components; - - components.push_back(AggregateWaveModel::ModelChannelSpec - (m_mainModel, -1)); - - components.push_back(AggregateWaveModel::ModelChannelSpec - (rm, -1)); - - Model *aggregateModel = new AggregateWaveModel(components); - ModelTransformer::Input aggregate(aggregateModel); - - TransformId id = "vamp:match-vamp-plugin:match:path"; //!!! configure - - TransformFactory *tf = TransformFactory::getInstance(); - - Transform transform = tf->getDefaultTransformFor - (id, aggregateModel->getSampleRate()); - - transform.setStepSize(transform.getBlockSize()/2); - transform.setParameter("serialise", 1); - - SVDEBUG << "Document::alignModel: Alignment transform step size " << transform.getStepSize() << ", block size " << transform.getBlockSize() << endl; - - ModelTransformerFactory *mtf = ModelTransformerFactory::getInstance(); - - QString message; - Model *transformOutput = mtf->transform(transform, aggregate, message); - - if (!transformOutput) { - transform.setStepSize(0); - transformOutput = mtf->transform(transform, aggregate, message); + if (!m_align->alignModel(m_mainModel, rm)) { + cerr << "Alignment failed: " << m_align->getError() << endl; + emit alignmentFailed(m_align->getError()); } - - SparseTimeValueModel *path = dynamic_cast<SparseTimeValueModel *> - (transformOutput); - - if (!path) { - cerr << "Document::alignModel: ERROR: Failed to create alignment path (no MATCH plugin?)" << endl; - emit alignmentFailed(id, message); - delete transformOutput; - delete aggregateModel; - return; - } - - path->setCompletion(0); - - AlignmentModel *alignmentModel = new AlignmentModel - (m_mainModel, model, aggregateModel, path); - - rm->setAlignment(alignmentModel); } void
--- a/framework/Document.h Sat Jan 30 12:05:14 2016 +0000 +++ b/framework/Document.h Fri Jan 13 10:29:55 2017 +0000 @@ -32,6 +32,8 @@ class AdditionalModelConverter; +class Align; + /** * A Sonic Visualiser document consists of a set of data models, and * also the visualisation layers used to display them. Changes to the @@ -301,7 +303,9 @@ QString message); void modelRegenerationWarning(QString layerName, QString transformName, QString message); - void alignmentFailed(QString transformName, QString message); + + void alignmentComplete(AlignmentModel *); + void alignmentFailed(QString message); void activity(QString); @@ -407,8 +411,6 @@ void writeBackwardCompatibleDerivation(QTextStream &, QString, Model *, const ModelRecord &) const; - static TransformId getAlignmentTransformName(); - void toXml(QTextStream &, QString, QString, bool asTemplate) const; void writePlaceholderMainModel(QTextStream &, QString) const; @@ -423,6 +425,7 @@ LayerSet m_layers; bool m_autoAlignment; + Align *m_align; }; #endif
--- a/framework/MainWindowBase.cpp Sat Jan 30 12:05:14 2016 +0000 +++ b/framework/MainWindowBase.cpp Fri Jan 13 10:29:55 2017 +0000 @@ -16,10 +16,10 @@ #include "MainWindowBase.h" #include "Document.h" - #include "view/Pane.h" #include "view/PaneStack.h" -#include "data/model/WaveFileModel.h" +#include "data/model/ReadOnlyWaveFileModel.h" +#include "data/model/WritableWaveFileModel.h" #include "data/model/SparseOneDimensionalModel.h" #include "data/model/NoteModel.h" #include "data/model/FlexiNoteModel.h" @@ -47,10 +47,10 @@ #include "widgets/ModelDataTableDialog.h" #include "widgets/InteractiveFileFinder.h" -#include "audioio/AudioCallbackPlaySource.h" -#include "audioio/AudioCallbackPlayTarget.h" -#include "audioio/AudioTargetFactory.h" -#include "audioio/PlaySpeedRangeMapper.h" +#include "audio/AudioCallbackPlaySource.h" +#include "audio/AudioCallbackRecordTarget.h" +#include "audio/PlaySpeedRangeMapper.h" + #include "data/fileio/DataFileReaderFactory.h" #include "data/fileio/PlaylistFileReader.h" #include "data/fileio/WavFileWriter.h" @@ -60,8 +60,6 @@ #include "data/fileio/AudioFileReaderFactory.h" #include "rdf/RDFImporter.h" -#include "data/fft/FFTDataServer.h" - #include "base/RecentFiles.h" #include "base/PlayParameterRepository.h" @@ -75,6 +73,11 @@ #include "data/osc/OSCQueue.h" #include "data/midi/MIDIInput.h" +#include <bqaudioio/SystemPlaybackTarget.h> +#include <bqaudioio/SystemAudioIO.h> +#include <bqaudioio/AudioFactory.h> +#include <bqaudioio/ResamplerWrapper.h> + #include <QApplication> #include <QMessageBox> #include <QGridLayout> @@ -131,15 +134,17 @@ #undef Window #endif -MainWindowBase::MainWindowBase(bool withAudioOutput, - bool withMIDIInput) : +MainWindowBase::MainWindowBase(SoundOptions options) : m_document(0), m_paneStack(0), m_viewManager(0), m_timeRulerLayer(0), - m_audioOutput(withAudioOutput), + m_soundOptions(options), m_playSource(0), + m_recordTarget(0), + m_resamplerWrapper(0), m_playTarget(0), + m_audioIO(0), m_oscQueue(0), m_oscQueueStarter(0), m_midiInput(0), @@ -152,11 +157,19 @@ m_lastPlayStatusSec(0), m_initialDarkBackground(false), m_defaultFfwdRwdStep(2, 0), + m_audioRecordMode(RecordCreateAdditionalModel), m_statusLabel(0), + m_iconsVisibleInMenus(true), m_menuShortcutMapper(0) { Profiler profiler("MainWindowBase::MainWindowBase"); + if (options & WithAudioInput) { + if (!(options & WithAudioOutput)) { + cerr << "WARNING: MainWindowBase: WithAudioInput requires WithAudioOutput -- recording will not work" << endl; + } + } + qRegisterMetaType<sv_frame_t>("sv_frame_t"); qRegisterMetaType<sv_samplerate_t>("sv_samplerate_t"); @@ -186,6 +199,7 @@ settings.setValue("view-font-size", viewFontSize); settings.endGroup(); +#ifdef NOT_DEFINED // This no longer works correctly on any platform AFAICS Preferences::BackgroundMode mode = Preferences::getInstance()->getBackgroundMode(); m_initialDarkBackground = m_viewManager->getGlobalDarkBackground(); @@ -193,6 +207,7 @@ m_viewManager->setGlobalDarkBackground (mode == Preferences::DarkBackground); } +#endif m_paneStack = new PaneStack(0, m_viewManager); connect(m_paneStack, SIGNAL(currentPaneChanged(Pane *)), @@ -215,19 +230,30 @@ this, SLOT(paneDropAccepted(Pane *, QString))); connect(m_paneStack, SIGNAL(paneDeleteButtonClicked(Pane *)), this, SLOT(paneDeleteButtonClicked(Pane *))); - - m_playSource = new AudioCallbackPlaySource(m_viewManager, - QApplication::applicationName()); + + m_playSource = new AudioCallbackPlaySource + (m_viewManager, QApplication::applicationName()); + + if (m_soundOptions & WithAudioInput) { + m_recordTarget = new AudioCallbackRecordTarget + (m_viewManager, QApplication::applicationName()); + connect(m_recordTarget, + SIGNAL(recordDurationChanged(sv_frame_t, sv_samplerate_t)), + this, + SLOT(recordDurationChanged(sv_frame_t, sv_samplerate_t))); + } connect(m_playSource, SIGNAL(sampleRateMismatch(sv_samplerate_t, sv_samplerate_t, bool)), this, SLOT(sampleRateMismatch(sv_samplerate_t, sv_samplerate_t, bool))); + connect(m_playSource, SIGNAL(channelCountIncreased(int)), + this, SLOT(audioChannelCountIncreased(int))); connect(m_playSource, SIGNAL(audioOverloadPluginDisabled()), this, SLOT(audioOverloadPluginDisabled())); connect(m_playSource, SIGNAL(audioTimeStretchMultiChannelDisabled()), this, SLOT(audioTimeStretchMultiChannelDisabled())); - connect(m_viewManager, SIGNAL(outputLevelsChanged(float, float)), - this, SLOT(outputLevelsChanged(float, float))); + connect(m_viewManager, SIGNAL(monitoringLevelsChanged(float, float)), + this, SLOT(monitoringLevelsChanged(float, float))); connect(m_viewManager, SIGNAL(playbackFrameChanged(sv_frame_t)), this, SLOT(playbackFrameChanged(sv_frame_t))); @@ -258,7 +284,7 @@ m_labeller = new Labeller(labellerType); m_labeller->setCounterCycleSize(cycle); - if (withMIDIInput) { + if (m_soundOptions & WithMIDIInput) { m_midiInput = new MIDIInput(QApplication::applicationName(), this); } @@ -268,9 +294,19 @@ MainWindowBase::~MainWindowBase() { SVDEBUG << "MainWindowBase::~MainWindowBase" << endl; - if (m_playTarget) m_playTarget->shutdown(); -// delete m_playTarget; + + // We have to delete the breakfastquay::SystemPlaybackTarget or + // breakfastquay::SystemAudioIO object (whichever we have -- it + // depends on whether we handle recording or not) before we delete + // the ApplicationPlaybackSource and ApplicationRecordTarget that + // they refer to. + + deleteAudioIO(); + + // Then delete the Application objects. delete m_playSource; + delete m_recordTarget; + delete m_viewManager; delete m_oscQueue; delete m_oscQueueStarter; @@ -315,12 +351,12 @@ } void -MainWindowBase::finaliseMenu(QMenu * -#ifdef Q_OS_MAC - menu -#endif - ) +MainWindowBase::finaliseMenu(QMenu *menu) { + foreach (QAction *a, menu->actions()) { + a->setIconVisibleInMenu(m_iconsVisibleInMenus); + } + #ifdef Q_OS_MAC // See https://bugreports.qt-project.org/browse/QTBUG-38256 and // our issue #890 http://code.soundsoftware.ac.uk/issues/890 -- @@ -447,7 +483,7 @@ QTimer *oscTimer = new QTimer(this); connect(oscTimer, SIGNAL(timeout()), this, SLOT(pollOSC())); oscTimer->start(1000); - cerr << "Finished setting up OSC interface" << endl; + SVCERR << "Finished setting up OSC interface" << endl; } } @@ -552,7 +588,7 @@ bool haveMainModel = (getMainModel() != 0); bool havePlayTarget = - (m_playTarget != 0); + (m_playTarget != 0 || m_audioIO != 0); bool haveSelection = (m_viewManager && !m_viewManager->getSelections().empty()); @@ -597,6 +633,7 @@ emit canMeasureLayer(haveCurrentLayer); emit canSelect(haveMainModel && haveCurrentPane); emit canPlay(haveMainModel && havePlayTarget); + emit canRecord(m_recordTarget != 0); emit canFfwd(haveMainModel); emit canRewind(haveMainModel); emit canPaste(haveClipboardContents); @@ -604,6 +641,8 @@ emit canInsertInstantsAtBoundaries(haveCurrentPane && haveSelection); emit canInsertItemAtSelection(haveCurrentPane && haveSelection && haveCurrentDurationLayer); emit canRenumberInstants(haveCurrentTimeInstantsLayer && haveSelection); + emit canSubdivideInstants(haveCurrentTimeInstantsLayer && haveSelection); + emit canWinnowInstants(haveCurrentTimeInstantsLayer && haveSelection); emit canPlaySelection(haveMainModel && havePlayTarget && haveSelection); emit canClearSelection(haveSelection); emit canEditSelection(haveSelection && haveCurrentEditableLayer); @@ -1196,9 +1235,60 @@ Labeller labeller(*m_labeller); labeller.setSampleRate(sodm->getSampleRate()); - // This uses a command - - labeller.labelAll<SparseOneDimensionalModel::Point>(*sodm, &ms); + Command *c = labeller.labelAll<SparseOneDimensionalModel::Point>(*sodm, &ms); + if (c) CommandHistory::getInstance()->addCommand(c, false); +} + +void +MainWindowBase::subdivideInstantsBy(int n) +{ + Pane *pane = m_paneStack->getCurrentPane(); + if (!pane) return; + + Layer *layer = dynamic_cast<TimeInstantLayer *>(pane->getSelectedLayer()); + if (!layer) return; + + MultiSelection ms(m_viewManager->getSelection()); + + Model *model = layer->getModel(); + SparseOneDimensionalModel *sodm = + dynamic_cast<SparseOneDimensionalModel *>(model); + if (!sodm) return; + + if (!m_labeller) return; + + Labeller labeller(*m_labeller); + labeller.setSampleRate(sodm->getSampleRate()); + + Command *c = labeller.subdivide<SparseOneDimensionalModel::Point> + (*sodm, &ms, n); + if (c) CommandHistory::getInstance()->addCommand(c, false); +} + +void +MainWindowBase::winnowInstantsBy(int n) +{ + Pane *pane = m_paneStack->getCurrentPane(); + if (!pane) return; + + Layer *layer = dynamic_cast<TimeInstantLayer *>(pane->getSelectedLayer()); + if (!layer) return; + + MultiSelection ms(m_viewManager->getSelection()); + + Model *model = layer->getModel(); + SparseOneDimensionalModel *sodm = + dynamic_cast<SparseOneDimensionalModel *>(model); + if (!sodm) return; + + if (!m_labeller) return; + + Labeller labeller(*m_labeller); + labeller.setSampleRate(sodm->getSampleRate()); + + Command *c = labeller.winnow<SparseOneDimensionalModel::Point> + (*sodm, &ms, n); + if (c) CommandHistory::getInstance()->addCommand(c, false); } MainWindowBase::FileOpenStatus @@ -1241,43 +1331,52 @@ } } - if (rdf) { - if (rdfSession) { - bool cancel = false; - if (!canImportLayer || shouldCreateNewSessionForRDFAudio(&cancel)) { - return openSession(source); - } else if (cancel) { - return FileOpenCancelled; + try { + if (rdf) { + if (rdfSession) { + bool cancel = false; + if (!canImportLayer || shouldCreateNewSessionForRDFAudio(&cancel)) { + return openSession(source); + } else if (cancel) { + return FileOpenCancelled; + } else { + return openLayer(source); + } } else { - return openLayer(source); - } - } else { - if ((status = openSession(source)) != FileOpenFailed) { - return status; - } else if (!canImportLayer) { - return FileOpenWrongMode; - } else if ((status = openLayer(source)) != FileOpenFailed) { - return status; - } else { - return FileOpenFailed; + if ((status = openSession(source)) != FileOpenFailed) { + return status; + } else if (!canImportLayer) { + return FileOpenWrongMode; + } else if ((status = openLayer(source)) != FileOpenFailed) { + return status; + } else { + return FileOpenFailed; + } } } - } - - if (audio && (status = openAudio(source, mode)) != FileOpenFailed) { - return status; - } else if ((status = openSession(source)) != FileOpenFailed) { - return status; - } else if ((status = openPlaylist(source, mode)) != FileOpenFailed) { - return status; - } else if (!canImportLayer) { - return FileOpenWrongMode; - } else if ((status = openImage(source)) != FileOpenFailed) { - return status; - } else if ((status = openLayer(source)) != FileOpenFailed) { - return status; - } else { - return FileOpenFailed; + + if (audio && (status = openAudio(source, mode)) != FileOpenFailed) { + return status; + } else if ((status = openSession(source)) != FileOpenFailed) { + return status; + } else if ((status = openPlaylist(source, mode)) != FileOpenFailed) { + return status; + } else if (!canImportLayer) { + return FileOpenWrongMode; + } else if ((status = openImage(source)) != FileOpenFailed) { + return status; + } else if ((status = openLayer(source)) != FileOpenFailed) { + return status; + } else { + return FileOpenFailed; + } + } catch (const InsufficientDiscSpace &e) { + emit hideSplash(); + m_openingAudioFile = false; + QMessageBox::critical + (this, tr("Not enough disc space"), + tr("<b>Not enough disc space</b><p>There doesn't appear to be enough spare disc space to accommodate any necessary temporary files.</p><p>Please clear some space and try again.</p>").arg(e.what())); + return FileOpenFailed; } } @@ -1289,6 +1388,7 @@ if (templateName == "") { templateName = getDefaultSessionTemplate(); + SVDEBUG << "(Default template is: \"" << templateName << "\")" << endl; } // cerr << "template is: \"" << templateName << "\"" << endl; @@ -1310,10 +1410,12 @@ if (Preferences::getInstance()->getFixedSampleRate() != 0) { rate = Preferences::getInstance()->getFixedSampleRate(); } else if (Preferences::getInstance()->getResampleOnLoad()) { - rate = m_playSource->getSourceSampleRate(); + if (getMainModel()) { + rate = getMainModel()->getSampleRate(); + } } - WaveFileModel *newModel = new WaveFileModel(source, rate); + ReadOnlyWaveFileModel *newModel = new ReadOnlyWaveFileModel(source, rate); if (!newModel->isOK()) { delete newModel; @@ -1407,11 +1509,11 @@ if (templateName != "") { FileOpenStatus tplStatus = openSessionTemplate(templateName); if (tplStatus == FileOpenCancelled) { - cerr << "Template load cancelled" << endl; + SVDEBUG << "Template load cancelled" << endl; return FileOpenCancelled; } if (tplStatus != FileOpenFailed) { - cerr << "Template load succeeded" << endl; + SVDEBUG << "Template load succeeded" << endl; loadedTemplate = true; } } @@ -1466,6 +1568,8 @@ } else if (mode == CreateAdditionalModel) { + SVCERR << "Mode is CreateAdditionalModel" << endl; + CommandHistory::getInstance()->startCompoundOperation (tr("Import \"%1\"").arg(source.getBasename()), true); @@ -1477,7 +1581,10 @@ Pane *pane = command->getPane(); if (m_timeRulerLayer) { + SVCERR << "Have time ruler, adding it" << endl; m_document->addLayerToView(pane, m_timeRulerLayer); + } else { + SVCERR << "Do not have time ruler" << endl; } Layer *newLayer = m_document->createImportedLayer(newModel); @@ -1767,6 +1874,51 @@ } MainWindowBase::FileOpenStatus +MainWindowBase::openDirOfAudio(QString dirPath) +{ + QDir dir(dirPath); + QStringList files = dir.entryList(QDir::Files | QDir::Readable); + files.sort(); + + FileOpenStatus status = FileOpenFailed; + bool first = true; + bool cancelled = false; + + foreach (QString file, files) { + + FileSource source(dir.filePath(file)); + if (!source.isAvailable()) { + continue; + } + + if (AudioFileReaderFactory::getKnownExtensions().contains + (source.getExtension().toLower())) { + + AudioFileOpenMode mode = CreateAdditionalModel; + if (first) mode = ReplaceSession; + + switch (openAudio(source, mode)) { + case FileOpenSucceeded: + status = FileOpenSucceeded; + first = false; + break; + case FileOpenFailed: + break; + case FileOpenCancelled: + cancelled = true; + break; + case FileOpenWrongMode: + break; + } + } + + if (cancelled) break; + } + + return status; +} + +MainWindowBase::FileOpenStatus MainWindowBase::openSessionPath(QString fileOrUrl) { ProgressDialog dialog(tr("Opening session..."), true, 2000, this); @@ -1876,6 +2028,7 @@ if (!source.isRemote()) m_sessionFile = source.getLocalFilename(); setupMenus(); + findTimeRulerLayer(); CommandHistory::getInstance()->clear(); CommandHistory::getInstance()->documentSaved(); @@ -1968,6 +2121,7 @@ emit activity(tr("Open session template \"%1\"").arg(source.getLocation())); setupMenus(); + findTimeRulerLayer(); CommandHistory::getInstance()->clear(); CommandHistory::getInstance()->documentSaved(); @@ -1998,6 +2152,7 @@ FileOpenStatus status = openLayersFromRDF(source); setupMenus(); + findTimeRulerLayer(); setWindowTitle(tr("%1: %2") .arg(QApplication::applicationName()) @@ -2024,7 +2179,9 @@ if (getMainModel()) { rate = getMainModel()->getSampleRate(); } else if (Preferences::getInstance()->getResampleOnLoad()) { - rate = m_playSource->getSourceSampleRate(); + if (getMainModel()) { + rate = getMainModel()->getSampleRate(); + } } RDFImporter importer @@ -2160,36 +2317,138 @@ } void -MainWindowBase::createPlayTarget() +MainWindowBase::createAudioIO() { - if (m_playTarget) return; + if (m_playTarget || m_audioIO) return; + + if (!(m_soundOptions & WithAudioOutput)) return; QSettings settings; settings.beginGroup("Preferences"); - QString targetName = settings.value("audio-target", "").toString(); + QString implementation = settings.value + ("audio-target", "").toString(); + QString suffix; + if (implementation != "") suffix = "-" + implementation; + QString recordDevice = settings.value + ("audio-record-device" + suffix, "").toString(); + QString playbackDevice = settings.value + ("audio-playback-device" + suffix, "").toString(); settings.endGroup(); - AudioTargetFactory *factory = AudioTargetFactory::getInstance(); - - factory->setDefaultCallbackTarget(targetName); - m_playTarget = factory->createCallbackTarget(m_playSource); - - if (!m_playTarget) { - emit hideSplash(); - - if (factory->isAutoCallbackTarget(targetName)) { - QMessageBox::warning - (this, tr("Couldn't open audio device"), - tr("<b>No audio available</b><p>Could not open an audio device for playback.<p>Automatic audio device detection failed. Audio playback will not be available during this session.</p>"), - QMessageBox::Ok); - } else { - QMessageBox::warning - (this, tr("Couldn't open audio device"), - tr("<b>No audio available</b><p>Failed to open your preferred audio device (\"%1\").<p>Audio playback will not be available during this session.</p>") - .arg(factory->getCallbackTargetDescription(targetName)), - QMessageBox::Ok); + if (implementation == "auto") { + implementation = ""; + } + + breakfastquay::AudioFactory::Preference preference; + preference.implementation = implementation.toStdString(); + preference.recordDevice = recordDevice.toStdString(); + preference.playbackDevice = playbackDevice.toStdString(); + + SVCERR << "createAudioIO: Preferred implementation = \"" + << preference.implementation << "\"" << endl; + SVCERR << "createAudioIO: Preferred playback device = \"" + << preference.playbackDevice << "\"" << endl; + SVCERR << "createAudioIO: Preferred record device = \"" + << preference.recordDevice << "\"" << endl; + + if (!m_resamplerWrapper) { + m_resamplerWrapper = new breakfastquay::ResamplerWrapper(m_playSource); + m_playSource->setResamplerWrapper(m_resamplerWrapper); + } + + std::string errorString; + + if (m_soundOptions & WithAudioInput) { + m_audioIO = breakfastquay::AudioFactory:: + createCallbackIO(m_recordTarget, m_resamplerWrapper, + preference, errorString); + if (m_audioIO) { + m_audioIO->suspend(); // start in suspended state + m_playSource->setSystemPlaybackTarget(m_audioIO); + } + } else { + m_playTarget = breakfastquay::AudioFactory:: + createCallbackPlayTarget(m_resamplerWrapper, + preference, errorString); + if (m_playTarget) { + m_playTarget->suspend(); // start in suspended state + m_playSource->setSystemPlaybackTarget(m_playTarget); } } + + if (!m_playTarget && !m_audioIO) { + emit hideSplash(); + QString message; + QString error = errorString.c_str(); + QString firstBit, secondBit; + if (implementation == "") { + if (error == "") { + firstBit = tr("<b>No audio available</b><p>Could not open an audio device.</p>"); + } else { + firstBit = tr("<b>No audio available</b><p>Could not open audio device: %1</p>").arg(error); + } + if (m_soundOptions & WithAudioInput) { + secondBit = tr("<p>Automatic audio device detection failed. Audio playback and recording will not be available during this session.</p>"); + } else { + secondBit = tr("<p>Automatic audio device detection failed. Audio playback will not be available during this session.</p>"); + } + } else { + QString driverName = breakfastquay::AudioFactory:: + getImplementationDescription(implementation.toStdString()) + .c_str(); + if (error == "") { + firstBit = tr("<b>No audio available</b><p>Failed to open your preferred audio driver (\"%1\").</p>").arg(driverName); + } else { + firstBit = tr("<b>No audio available</b><p>Failed to open your preferred audio driver (\"%1\"): %2.</p>").arg(driverName).arg(error); + } + if (m_soundOptions & WithAudioInput) { + secondBit = tr("<p>Audio playback and recording will not be available during this session.</p>"); + } else { + secondBit = tr("<p>Audio playback will not be available during this session.</p>"); + } + } + SVDEBUG << "createAudioIO: ERROR: Failed to open audio device \"" + << implementation << "\": error is: " << error << endl; + QMessageBox::warning(this, tr("Couldn't open audio device"), + firstBit + secondBit, QMessageBox::Ok); + } +} + +void +MainWindowBase::deleteAudioIO() +{ + // First prevent this trying to call target. + if (m_playSource) { + m_playSource->setSystemPlaybackTarget(0); + m_playSource->setResamplerWrapper(0); + } + + // Then delete the breakfastquay::System object. + // Only one of these two exists! + delete m_audioIO; + delete m_playTarget; + + // And the breakfastquay resampler wrapper. We need to + // delete/recreate this if the channel count changes, which is one + // of the use cases for recreateAudioIO() calling this + delete m_resamplerWrapper; + + m_audioIO = 0; + m_playTarget = 0; + m_resamplerWrapper = 0; +} + +void +MainWindowBase::recreateAudioIO() +{ + deleteAudioIO(); + createAudioIO(); +} + +void +MainWindowBase::audioChannelCountIncreased(int) +{ + recreateAudioIO(); } WaveFileModel * @@ -2235,8 +2494,10 @@ this, SLOT(modelGenerationFailed(QString, QString))); connect(m_document, SIGNAL(modelRegenerationWarning(QString, QString, QString)), this, SLOT(modelRegenerationWarning(QString, QString, QString))); - connect(m_document, SIGNAL(alignmentFailed(QString, QString)), - this, SLOT(alignmentFailed(QString, QString))); + connect(m_document, SIGNAL(alignmentComplete(AlignmentModel *)), + this, SLOT(alignmentComplete(AlignmentModel *))); + connect(m_document, SIGNAL(alignmentFailed(QString)), + this, SLOT(alignmentFailed(QString))); emit replacedDocument(); } @@ -2483,6 +2744,26 @@ } void +MainWindowBase::findTimeRulerLayer() +{ + for (int i = 0; i < m_paneStack->getPaneCount(); ++i) { + Pane *pane = m_paneStack->getPane(i); + if (!pane) continue; + for (int j = 0; j < pane->getLayerCount(); ++j) { + Layer *layer = pane->getLayer(j); + if (!dynamic_cast<TimeRulerLayer *>(layer)) continue; + m_timeRulerLayer = layer; + return; + } + } + if (m_timeRulerLayer) { + SVCERR << "WARNING: Time ruler layer was not reset to 0 before session template loaded?" << endl; + delete m_timeRulerLayer; + m_timeRulerLayer = 0; + } +} + +void MainWindowBase::toggleTimeRulers() { bool haveRulers = false; @@ -2620,15 +2901,164 @@ void MainWindowBase::play() { - if (m_playSource->isPlaying()) { + if ((m_recordTarget && m_recordTarget->isRecording()) || + (m_playSource && m_playSource->isPlaying())) { stop(); + QAction *action = qobject_cast<QAction *>(sender()); + if (action) action->setChecked(false); } else { + if (m_audioIO) m_audioIO->resume(); + else if (m_playTarget) m_playTarget->resume(); playbackFrameChanged(m_viewManager->getPlaybackFrame()); m_playSource->play(m_viewManager->getPlaybackFrame()); } } void +MainWindowBase::record() +{ + if (!(m_soundOptions & WithAudioInput)) { + return; + } + + if (!m_recordTarget) { + //!!! report + return; + } + + if (!m_audioIO) { + cerr << "MainWindowBase::record: about to create audio IO" << endl; + createAudioIO(); + } + + if (!m_audioIO) { + // don't need to report this, createAudioIO already should have + return; + } + + if (m_recordTarget->isRecording()) { + stop(); + return; + } + + QAction *action = qobject_cast<QAction *>(sender()); + + if (m_audioRecordMode == RecordReplaceSession) { + if (!checkSaveModified()) { + if (action) action->setChecked(false); + return; + } + } + + if (m_viewManager) m_viewManager->setGlobalCentreFrame(0); + + cerr << "MainWindowBase::record: about to resume" << endl; + m_audioIO->resume(); + + WritableWaveFileModel *model = m_recordTarget->startRecording(); + if (!model) { + cerr << "ERROR: MainWindowBase::record: Recording failed" << endl; + //!!! report + if (action) action->setChecked(false); + return; + } + + if (!model->isOK()) { + m_recordTarget->stopRecording(); + m_audioIO->suspend(); + delete model; + return; + } + + PlayParameterRepository::getInstance()->addPlayable(model); + + if (m_audioRecordMode == RecordReplaceSession || !getMainModel()) { + + //!!! duplication with openAudio here + + QString templateName = getDefaultSessionTemplate(); + bool loadedTemplate = false; + + if (templateName != "") { + FileOpenStatus tplStatus = openSessionTemplate(templateName); + if (tplStatus == FileOpenCancelled) { + m_recordTarget->stopRecording(); + m_audioIO->suspend(); + PlayParameterRepository::getInstance()->removePlayable(model); + return; + } + if (tplStatus != FileOpenFailed) { + loadedTemplate = true; + } + } + + if (!loadedTemplate) { + closeSession(); + createDocument(); + } + + Model *prevMain = getMainModel(); + if (prevMain) { + m_playSource->removeModel(prevMain); + PlayParameterRepository::getInstance()->removePlayable(prevMain); + } + + m_document->setMainModel(model); + setupMenus(); + findTimeRulerLayer(); + + if (loadedTemplate || (m_sessionFile == "")) { + //!!! shouldn't be dealing directly with title from here -- call a method + setWindowTitle(tr("%1: %2") + .arg(QApplication::applicationName()) + .arg(model->getLocation())); + CommandHistory::getInstance()->clear(); + CommandHistory::getInstance()->documentSaved(); + m_documentModified = false; + } else { + setWindowTitle(tr("%1: %2 [%3]") + .arg(QApplication::applicationName()) + .arg(QFileInfo(m_sessionFile).fileName()) + .arg(model->getLocation())); + if (m_documentModified) { + m_documentModified = false; + documentModified(); // so as to restore "(modified)" window title + } + } + + } else { + + CommandHistory::getInstance()->startCompoundOperation + (tr("Import Recorded Audio"), true); + + m_document->addImportedModel(model); + + AddPaneCommand *command = new AddPaneCommand(this); + CommandHistory::getInstance()->addCommand(command); + + Pane *pane = command->getPane(); + + if (m_timeRulerLayer) { + m_document->addLayerToView(pane, m_timeRulerLayer); + } + + Layer *newLayer = m_document->createImportedLayer(model); + + if (newLayer) { + m_document->addLayerToView(pane, newLayer); + } + + CommandHistory::getInstance()->endCompoundOperation(); + } + + updateMenuStates(); + m_recentFiles.addFile(model->getLocation()); + currentPaneChanged(m_paneStack->getCurrentPane()); + + emit audioFileLoaded(); +} + +void MainWindowBase::ffwd() { if (!getMainModel()) return; @@ -2856,8 +3286,18 @@ void MainWindowBase::stop() { + if (m_recordTarget && + m_recordTarget->isRecording()) { + m_recordTarget->stopRecording(); + } + + if (!m_playSource) return; + m_playSource->stop(); + if (m_audioIO) m_audioIO->suspend(); + else if (m_playTarget) m_playTarget->suspend(); + if (m_paneStack && m_paneStack->getCurrentPane()) { updateVisibleRangeDisplay(m_paneStack->getCurrentPane()); } else { @@ -3196,6 +3636,17 @@ } void +MainWindowBase::recordDurationChanged(sv_frame_t frame, sv_samplerate_t rate) +{ + RealTime duration = RealTime::frame2RealTime(frame, rate); + QString durStr = duration.toSecText().c_str(); + + m_myStatusMessage = tr("Recording: %1").arg(durStr); + + getStatusLabel()->setText(m_myStatusMessage); +} + +void MainWindowBase::globalCentreFrameChanged(sv_frame_t ) { if ((m_playSource && m_playSource->isPlaying()) || !getMainModel()) return; @@ -3326,8 +3777,9 @@ // SVDEBUG << "MainWindowBase::mainModelChanged(" << model << ")" << endl; updateDescriptionLabel(); if (model) m_viewManager->setMainModelSampleRate(model->getSampleRate()); - if (model && !m_playTarget && m_audioOutput) { - createPlayTarget(); + if (model && !(m_playTarget || m_audioIO) && + (m_soundOptions & WithAudioOutput)) { + createAudioIO(); } } @@ -3339,7 +3791,6 @@ m_viewManager->setPlaybackModel(0); } m_playSource->removeModel(model); - FFTDataServer::modelAboutToBeDeleted(model); } void @@ -3379,6 +3830,12 @@ } void +MainWindowBase::alignmentComplete(AlignmentModel *model) +{ + cerr << "MainWindowBase::alignmentComplete(" << model << ")" << endl; +} + +void MainWindowBase::pollOSC() { if (!m_oscQueue || m_oscQueue->isEmpty()) return; @@ -3457,4 +3914,30 @@ #endif } - +void +MainWindowBase::openLocalFolder(QString path) +{ + QDir d(path); + if (d.exists()) { + QStringList args; + QString path = d.canonicalPath(); +#if defined Q_OS_WIN32 + // Although the Win32 API is quite happy to have + // forward slashes as directory separators, Windows + // Explorer is not + path = path.replace('/', '\\'); + args << path; + QProcess::execute("c:/windows/explorer.exe", args); +#else + args << path; + QProcess::execute( +#if defined Q_OS_MAC + "/usr/bin/open", +#else + "/usr/bin/xdg-open", +#endif + args); +#endif + } +} +
--- a/framework/MainWindowBase.h Sat Jan 30 12:05:14 2016 +0000 +++ b/framework/MainWindowBase.h Fri Jan 13 10:29:55 2017 +0000 @@ -13,8 +13,8 @@ COPYING included with this distribution for more information. */ -#ifndef _MAIN_WINDOW_BASE_H_ -#define _MAIN_WINDOW_BASE_H_ +#ifndef SV_MAIN_WINDOW_BASE_H +#define SV_MAIN_WINDOW_BASE_H #include <QFrame> #include <QString> @@ -46,10 +46,12 @@ class WaveformLayer; class WaveFileModel; class AudioCallbackPlaySource; -class AudioCallbackPlayTarget; +class AudioCallbackRecordTarget; class CommandHistory; class QMenu; class AudioDial; +class LevelPanWidget; +class LevelPanToolButton; class QLabel; class QCheckBox; class PreferencesDialog; @@ -62,6 +64,13 @@ class ModelDataTableDialog; class QSignalMapper; class QShortcut; +class AlignmentModel; + +namespace breakfastquay { + class SystemPlaybackTarget; + class SystemAudioIO; + class ResamplerWrapper; +} /** * The base class for the SV main window. This includes everything to @@ -77,7 +86,16 @@ Q_OBJECT public: - MainWindowBase(bool withAudioOutput, bool withMIDIInput); + enum SoundOption { + WithAudioOutput = 0x01, + WithAudioInput = 0x02, + WithMIDIInput = 0x04, + WithEverything = 0xff, + WithNothing = 0x00 + }; + typedef int SoundOptions; + + MainWindowBase(SoundOptions options = WithEverything); virtual ~MainWindowBase(); enum AudioFileOpenMode { @@ -95,6 +113,11 @@ FileOpenWrongMode // attempted to open layer when no main model present }; + enum AudioRecordMode { + RecordReplaceSession, + RecordCreateAdditionalModel + }; + virtual FileOpenStatus open(FileSource source, AudioFileOpenMode = AskUser); virtual FileOpenStatus openPath(QString fileOrUrl, AudioFileOpenMode = AskUser); virtual FileOpenStatus openAudio(FileSource source, AudioFileOpenMode = AskUser, QString templateName = ""); @@ -102,6 +125,8 @@ virtual FileOpenStatus openLayer(FileSource source); virtual FileOpenStatus openImage(FileSource source); + virtual FileOpenStatus openDirOfAudio(QString dirPath); + virtual FileOpenStatus openSession(FileSource source); virtual FileOpenStatus openSessionPath(QString fileOrUrl); virtual FileOpenStatus openSessionTemplate(QString templateName); @@ -117,6 +142,10 @@ m_defaultFfwdRwdStep = step; } + void setAudioRecordMode(AudioRecordMode mode) { + m_audioRecordMode = mode; + } + signals: // Used to toggle the availability of menu actions void canAddPane(bool); @@ -142,10 +171,13 @@ void canInsertInstantsAtBoundaries(bool); void canInsertItemAtSelection(bool); void canRenumberInstants(bool); + void canSubdivideInstants(bool); + void canWinnowInstants(bool); void canDeleteCurrentLayer(bool); void canZoom(bool); void canScroll(bool); void canPlay(bool); + void canRecord(bool); void canFfwd(bool); void canRewind(bool); void canPlaySelection(bool); @@ -168,6 +200,7 @@ public slots: virtual void preferenceChanged(PropertyContainer::PropertyName); virtual void resizeConstrained(QSize); + virtual void recreateAudioIO(); protected slots: virtual void zoomIn(); @@ -196,6 +229,7 @@ virtual void ffwdEnd(); virtual void rewind(); virtual void rewindStart(); + virtual void record(); virtual void stop(); virtual void ffwdSimilar(); @@ -214,6 +248,8 @@ virtual void playSelectionToggled(); virtual void playSoloToggled(); + virtual void audioChannelCountIncreased(int count); + virtual void sampleRateMismatch(sv_samplerate_t, sv_samplerate_t, bool) = 0; virtual void audioOverloadPluginDisabled() = 0; virtual void audioTimeStretchMultiChannelDisabled() = 0; @@ -222,7 +258,8 @@ virtual void globalCentreFrameChanged(sv_frame_t); virtual void viewCentreFrameChanged(View *, sv_frame_t); virtual void viewZoomLevelChanged(View *, int, bool); - virtual void outputLevelsChanged(float, float) = 0; + virtual void monitoringLevelsChanged(float, float) = 0; + virtual void recordDurationChanged(sv_frame_t, sv_samplerate_t); virtual void currentPaneChanged(Pane *); virtual void currentLayerChanged(Pane *, Layer *); @@ -246,6 +283,8 @@ virtual void insertItemAtSelection(); virtual void insertItemAt(sv_frame_t, sv_frame_t); virtual void renumberInstants(); + virtual void subdivideInstantsBy(int); + virtual void winnowInstantsBy(int); virtual void documentModified(); virtual void documentRestored(); @@ -266,7 +305,9 @@ virtual void modelGenerationWarning(QString, QString) = 0; virtual void modelRegenerationFailed(QString, QString, QString) = 0; virtual void modelRegenerationWarning(QString, QString, QString) = 0; - virtual void alignmentFailed(QString, QString) = 0; + + virtual void alignmentComplete(AlignmentModel *); + virtual void alignmentFailed(QString) = 0; virtual void rightButtonMenuRequested(Pane *, QPoint point) = 0; @@ -304,9 +345,13 @@ ViewManager *m_viewManager; Layer *m_timeRulerLayer; - bool m_audioOutput; + SoundOptions m_soundOptions; + AudioCallbackPlaySource *m_playSource; - AudioCallbackPlayTarget *m_playTarget; + AudioCallbackRecordTarget *m_recordTarget; + breakfastquay::ResamplerWrapper *m_resamplerWrapper; + breakfastquay::SystemPlaybackTarget *m_playTarget; // only one of this... + breakfastquay::SystemAudioIO *m_audioIO; // ... and this exists class OSCQueueStarter : public QThread { @@ -342,6 +387,8 @@ RealTime m_defaultFfwdRwdStep; + AudioRecordMode m_audioRecordMode; + mutable QLabel *m_statusLabel; QLabel *getStatusLabel() const; @@ -421,18 +468,27 @@ virtual QString getDefaultSessionTemplate() const; virtual void setDefaultSessionTemplate(QString); - virtual void createPlayTarget(); + virtual void findTimeRulerLayer(); + + virtual void createAudioIO(); + virtual void deleteAudioIO(); + virtual void openHelpUrl(QString url); + virtual void openLocalFolder(QString path); virtual void setupMenus() = 0; virtual void updateVisibleRangeDisplay(Pane *p) const = 0; virtual void updatePositionStatusDisplays() const = 0; // Call this after setting up the menu bar, to fix up single-key - // shortcuts on OS/X + // shortcuts on OS/X and do any other platform-specific tidying virtual void finaliseMenus(); virtual void finaliseMenu(QMenu *); + // Call before finaliseMenus if you wish to have a say in this question + void setIconsVisibleInMenus(bool visible) { m_iconsVisibleInMenus = visible; } + bool m_iconsVisibleInMenus; + // Only used on OS/X to work around a Qt/Cocoa bug, see finaliseMenus QSignalMapper *m_menuShortcutMapper; QList<QShortcut *> m_appShortcuts;
--- a/framework/SVFileReader.cpp Sat Jan 30 12:05:14 2016 +0000 +++ b/framework/SVFileReader.cpp Fri Jan 13 10:29:55 2017 +0000 @@ -26,7 +26,7 @@ #include "data/fileio/FileFinder.h" -#include "data/model/WaveFileModel.h" +#include "data/model/ReadOnlyWaveFileModel.h" #include "data/model/EditableDenseThreeDimensionalModel.h" #include "data/model/SparseOneDimensionalModel.h" #include "data/model/SparseTimeValueModel.h" @@ -393,14 +393,12 @@ } bool -SVFileReader::readWindow(const QXmlAttributes &attributes) +SVFileReader::readWindow(const QXmlAttributes &) { - bool ok = false; - - READ_MANDATORY(int, width, toInt); - READ_MANDATORY(int, height, toInt); - - m_paneCallback.setWindowSize(width, height); + // The window element contains window dimensions, which we used to + // read and size the window accordingly. This was a Bad Idea [tm] + // and we now do nothing instead. See #1769 Loading window + // dimensions from session file is a really bad idea return true; } @@ -489,7 +487,7 @@ if (mm) rate = mm->getSampleRate(); } - model = new WaveFileModel(file, rate); + model = new ReadOnlyWaveFileModel(file, rate); if (!model->isOK()) { delete model; model = 0; @@ -1166,7 +1164,7 @@ for (QStringList::iterator i = data.begin(); i != data.end(); ++i) { - if (values.size() == (int)dtdm->getHeight()) { + if (int(values.size()) == dtdm->getHeight()) { if (!warned) { cerr << "WARNING: SV-XML: Too many y-bins in 3-D dataset row " << m_rowNumber << endl;
--- a/framework/TransformUserConfigurator.cpp Sat Jan 30 12:05:14 2016 +0000 +++ b/framework/TransformUserConfigurator.cpp Fri Jan 13 10:29:55 2017 +0000 @@ -45,12 +45,12 @@ { if (plugin && plugin->getType() == "Feature Extraction Plugin") { Vamp::Plugin *vp = static_cast<Vamp::Plugin *>(plugin); - SVDEBUG << "TransformUserConfigurator::getChannelRange: is a VP" << endl; + SVDEBUG << "TransformUserConfigurator::getChannelRange: is a Vamp plugin" << endl; minChannels = int(vp->getMinChannelCount()); maxChannels = int(vp->getMaxChannelCount()); return true; } else { - SVDEBUG << "TransformUserConfigurator::getChannelRange: is not a VP" << endl; + SVDEBUG << "TransformUserConfigurator::getChannelRange: is not a Vamp plugin" << endl; return TransformFactory::getInstance()-> getTransformChannelRange(identifier, minChannels, maxChannels); } @@ -80,28 +80,9 @@ if (!plugin) return false; - if (FeatureExtractionPluginFactory::instanceFor(id)) { - - Vamp::Plugin *vp = static_cast<Vamp::Plugin *>(plugin); - - frequency = (vp->getInputDomain() == Vamp::Plugin::FrequencyDomain); - - std::vector<Vamp::Plugin::OutputDescriptor> od = - vp->getOutputDescriptors(); - - cerr << "configure: looking for output: " << output << endl; - - if (od.size() > 1) { - for (size_t i = 0; i < od.size(); ++i) { - if (od[i].identifier == output.toStdString()) { - outputLabel = od[i].name.c_str(); - outputDescription = od[i].description.c_str(); - break; - } - } - } - - } else if (RealTimePluginFactory::instanceFor(id)) { + SVDEBUG << "TransformUserConfigurator::configure: identifier " << id << endl; + + if (RealTimePluginFactory::instanceFor(id)) { RealTimePluginFactory *factory = RealTimePluginFactory::instanceFor(id); const RealTimePluginDescriptor *desc = factory->getPluginDescriptor(id); @@ -130,8 +111,29 @@ SVDEBUG << "Setting auditioning effect" << endl; source->setAuditioningEffect(rtp); } + + } else { + + Vamp::Plugin *vp = static_cast<Vamp::Plugin *>(plugin); + + frequency = (vp->getInputDomain() == Vamp::Plugin::FrequencyDomain); + + std::vector<Vamp::Plugin::OutputDescriptor> od = + vp->getOutputDescriptors(); + +// cerr << "configure: looking for output: " << output << endl; + + if (od.size() > 1) { + for (size_t i = 0; i < od.size(); ++i) { + if (od[i].identifier == output.toStdString()) { + outputLabel = od[i].name.c_str(); + outputDescription = od[i].description.c_str(); + break; + } + } + } } - + int sourceChannels = 1; if (dynamic_cast<DenseTimeValueModel *>(inputModel)) { sourceChannels = dynamic_cast<DenseTimeValueModel *>(inputModel) @@ -182,12 +184,12 @@ if (selectedInput != "") { if (modelMap.contains(selectedInput)) { inputModel = modelMap.value(selectedInput); - cerr << "Found selected input \"" << selectedInput << "\" in model map, result is " << inputModel << endl; + SVDEBUG << "Found selected input \"" << selectedInput << "\" in model map, result is " << inputModel << endl; } else { - cerr << "Failed to find selected input \"" << selectedInput << "\" in model map" << endl; + SVDEBUG << "Failed to find selected input \"" << selectedInput << "\" in model map" << endl; } } else { - cerr << "Selected input empty: \"" << selectedInput << "\"" << endl; + SVDEBUG << "Selected input empty: \"" << selectedInput << "\"" << endl; } // Write parameters back to transform object
--- a/svapp.pro Sat Jan 30 12:05:14 2016 +0000 +++ b/svapp.pro Fri Jan 13 10:29:55 2017 +0000 @@ -1,76 +1,24 @@ TEMPLATE = lib +INCLUDEPATH += ../vamp-plugin-sdk + exists(config.pri) { include(config.pri) } -!exists(config.pri) { - - CONFIG += release - DEFINES += NDEBUG BUILD_RELEASE NO_TIMING - - win32-g++ { - INCLUDEPATH += ../sv-dependency-builds/win32-mingw/include - LIBS += -L../sv-dependency-builds/win32-mingw/lib - } - win32-msvc* { - INCLUDEPATH += ../sv-dependency-builds/win32-msvc/include - LIBS += -L../sv-dependency-builds/win32-msvc/lib - } - macx* { - INCLUDEPATH += ../sv-dependency-builds/osx/include - LIBS += -L../sv-dependency-builds/osx/lib - } - - win* { - DEFINES += HAVE_PORTAUDIO_2_0 - } - macx* { - DEFINES += HAVE_COREAUDIO HAVE_PORTAUDIO_2_0 - } -} CONFIG += staticlib qt thread warn_on stl rtti exceptions c++11 QT += network xml gui widgets TARGET = svapp -DEPENDPATH += . ../svcore ../svgui -INCLUDEPATH += . ../svcore ../svgui +DEPENDPATH += . ../bqaudioio ../svcore ../svgui ../piper-cpp +INCLUDEPATH += . ../bqaudioio ../svcore ../svgui ../piper-cpp OBJECTS_DIR = o MOC_DIR = o -HEADERS += audioio/AudioCallbackPlaySource.h \ - audioio/AudioCallbackPlayTarget.h \ - audioio/AudioGenerator.h \ - audioio/AudioJACKTarget.h \ - audioio/AudioPortAudioTarget.h \ - audioio/AudioPulseAudioTarget.h \ - audioio/AudioTargetFactory.h \ - audioio/ClipMixer.h \ - audioio/ContinuousSynth.h \ - audioio/PlaySpeedRangeMapper.h +include(files.pri) -SOURCES += audioio/AudioCallbackPlaySource.cpp \ - audioio/AudioCallbackPlayTarget.cpp \ - audioio/AudioGenerator.cpp \ - audioio/AudioJACKTarget.cpp \ - audioio/AudioPortAudioTarget.cpp \ - audioio/AudioPulseAudioTarget.cpp \ - audioio/AudioTargetFactory.cpp \ - audioio/ClipMixer.cpp \ - audioio/ContinuousSynth.cpp \ - audioio/PlaySpeedRangeMapper.cpp +HEADERS = $$(SVAPP_HEADERS) +SOURCES = $$(SVAPP_SOURCES) -HEADERS += framework/Document.h \ - framework/MainWindowBase.h \ - framework/SVFileReader.h \ - framework/TransformUserConfigurator.h \ - framework/VersionTester.h - -SOURCES += framework/Document.cpp \ - framework/MainWindowBase.cpp \ - framework/SVFileReader.cpp \ - framework/TransformUserConfigurator.cpp \ - framework/VersionTester.cpp -