changeset 468:56acd9368532 bqaudioio

Initial work toward switching to bqaudioio library (so as to get I/O, not just O)
author Chris Cannam
date Tue, 04 Aug 2015 13:27:42 +0100
parents 45054b36ddbf
children 4480b031fe38 c6094bca34f4
files audio/AudioCallbackPlaySource.cpp audio/AudioCallbackPlaySource.h audio/AudioGenerator.cpp audio/AudioGenerator.h audio/ClipMixer.cpp audio/ClipMixer.h audio/ContinuousSynth.cpp audio/ContinuousSynth.h audio/PlaySpeedRangeMapper.cpp audio/PlaySpeedRangeMapper.h audioio/AudioCallbackPlaySource.cpp audioio/AudioCallbackPlaySource.h audioio/AudioCallbackPlayTarget.cpp audioio/AudioCallbackPlayTarget.h audioio/AudioGenerator.cpp audioio/AudioGenerator.h audioio/AudioJACKTarget.cpp audioio/AudioJACKTarget.h audioio/AudioPortAudioTarget.cpp audioio/AudioPortAudioTarget.h audioio/AudioPulseAudioTarget.cpp audioio/AudioPulseAudioTarget.h audioio/AudioTargetFactory.cpp audioio/AudioTargetFactory.h audioio/ClipMixer.cpp audioio/ClipMixer.h audioio/ContinuousSynth.cpp audioio/ContinuousSynth.h audioio/PlaySpeedRangeMapper.cpp audioio/PlaySpeedRangeMapper.h configure.ac framework/MainWindowBase.cpp framework/MainWindowBase.h svapp.pro
diffstat 34 files changed, 3929 insertions(+), 5644 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audio/AudioCallbackPlaySource.cpp	Tue Aug 04 13:27:42 2015 +0100
@@ -0,0 +1,1902 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam and QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "AudioCallbackPlaySource.h"
+
+#include "AudioGenerator.h"
+
+#include "data/model/Model.h"
+#include "base/ViewManagerBase.h"
+#include "base/PlayParameterRepository.h"
+#include "base/Preferences.h"
+#include "data/model/DenseTimeValueModel.h"
+#include "data/model/WaveFileModel.h"
+#include "data/model/SparseOneDimensionalModel.h"
+#include "plugin/RealTimePluginInstance.h"
+
+#include "bqaudioio/SystemPlaybackTarget.h"
+
+#include <rubberband/RubberBandStretcher.h>
+using namespace RubberBand;
+
+#include <iostream>
+#include <cassert>
+
+//#define DEBUG_AUDIO_PLAY_SOURCE 1
+//#define DEBUG_AUDIO_PLAY_SOURCE_PLAYING 1
+
+static const int DEFAULT_RING_BUFFER_SIZE = 131071;
+
+AudioCallbackPlaySource::AudioCallbackPlaySource(ViewManagerBase *manager,
+                                                 QString clientName) :
+    m_viewManager(manager),
+    m_audioGenerator(new AudioGenerator()),
+    m_clientName(clientName.toUtf8().data()),
+    m_readBuffers(0),
+    m_writeBuffers(0),
+    m_readBufferFill(0),
+    m_writeBufferFill(0),
+    m_bufferScavenger(1),
+    m_sourceChannelCount(0),
+    m_blockSize(1024),
+    m_sourceSampleRate(0),
+    m_targetSampleRate(0),
+    m_playLatency(0),
+    m_target(0),
+    m_lastRetrievalTimestamp(0.0),
+    m_lastRetrievedBlockSize(0),
+    m_trustworthyTimestamps(true),
+    m_lastCurrentFrame(0),
+    m_playing(false),
+    m_exiting(false),
+    m_lastModelEndFrame(0),
+    m_ringBufferSize(DEFAULT_RING_BUFFER_SIZE),
+    m_outputLeft(0.0),
+    m_outputRight(0.0),
+    m_auditioningPlugin(0),
+    m_auditioningPluginBypassed(false),
+    m_playStartFrame(0),
+    m_playStartFramePassed(false),
+    m_timeStretcher(0),
+    m_monoStretcher(0),
+    m_stretchRatio(1.0),
+    m_stretchMono(false),
+    m_stretcherInputCount(0),
+    m_stretcherInputs(0),
+    m_stretcherInputSizes(0),
+    m_fillThread(0),
+    m_converter(0),
+    m_crapConverter(0),
+    m_resampleQuality(Preferences::getInstance()->getResampleQuality())
+{
+    m_viewManager->setAudioPlaySource(this);
+
+    connect(m_viewManager, SIGNAL(selectionChanged()),
+	    this, SLOT(selectionChanged()));
+    connect(m_viewManager, SIGNAL(playLoopModeChanged()),
+	    this, SLOT(playLoopModeChanged()));
+    connect(m_viewManager, SIGNAL(playSelectionModeChanged()),
+	    this, SLOT(playSelectionModeChanged()));
+
+    connect(this, SIGNAL(playStatusChanged(bool)),
+            m_viewManager, SLOT(playStatusChanged(bool)));
+
+    connect(PlayParameterRepository::getInstance(),
+	    SIGNAL(playParametersChanged(PlayParameters *)),
+	    this, SLOT(playParametersChanged(PlayParameters *)));
+
+    connect(Preferences::getInstance(),
+            SIGNAL(propertyChanged(PropertyContainer::PropertyName)),
+            this, SLOT(preferenceChanged(PropertyContainer::PropertyName)));
+}
+
+AudioCallbackPlaySource::~AudioCallbackPlaySource()
+{
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    SVDEBUG << "AudioCallbackPlaySource::~AudioCallbackPlaySource entering" << endl;
+#endif
+    m_exiting = true;
+
+    if (m_fillThread) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "AudioCallbackPlaySource dtor: awakening thread" << endl;
+#endif
+        m_condition.wakeAll();
+	m_fillThread->wait();
+	delete m_fillThread;
+    }
+
+    clearModels();
+    
+    if (m_readBuffers != m_writeBuffers) {
+	delete m_readBuffers;
+    }
+
+    delete m_writeBuffers;
+
+    delete m_audioGenerator;
+
+    for (int i = 0; i < m_stretcherInputCount; ++i) {
+        delete[] m_stretcherInputs[i];
+    }
+    delete[] m_stretcherInputSizes;
+    delete[] m_stretcherInputs;
+
+    delete m_timeStretcher;
+    delete m_monoStretcher;
+
+    m_bufferScavenger.scavenge(true);
+    m_pluginScavenger.scavenge(true);
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    SVDEBUG << "AudioCallbackPlaySource::~AudioCallbackPlaySource finishing" << endl;
+#endif
+}
+
+void
+AudioCallbackPlaySource::addModel(Model *model)
+{
+    if (m_models.find(model) != m_models.end()) return;
+
+    bool willPlay = m_audioGenerator->addModel(model);
+
+    m_mutex.lock();
+
+    m_models.insert(model);
+    if (model->getEndFrame() > m_lastModelEndFrame) {
+	m_lastModelEndFrame = model->getEndFrame();
+    }
+
+    bool buffersChanged = false, srChanged = false;
+
+    int modelChannels = 1;
+    DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
+    if (dtvm) modelChannels = dtvm->getChannelCount();
+    if (modelChannels > m_sourceChannelCount) {
+	m_sourceChannelCount = modelChannels;
+    }
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "AudioCallbackPlaySource: Adding model with " << modelChannels << " channels at rate " << model->getSampleRate() << endl;
+#endif
+
+    if (m_sourceSampleRate == 0) {
+
+	m_sourceSampleRate = model->getSampleRate();
+	srChanged = true;
+
+    } else if (model->getSampleRate() != m_sourceSampleRate) {
+
+        // If this is a dense time-value model and we have no other, we
+        // can just switch to this model's sample rate
+
+        if (dtvm) {
+
+            bool conflicting = false;
+
+            for (std::set<Model *>::const_iterator i = m_models.begin();
+                 i != m_models.end(); ++i) {
+                // Only wave file models can be considered conflicting --
+                // writable wave file models are derived and we shouldn't
+                // take their rates into account.  Also, don't give any
+                // particular weight to a file that's already playing at
+                // the wrong rate anyway
+                WaveFileModel *wfm = dynamic_cast<WaveFileModel *>(*i);
+                if (wfm && wfm != dtvm &&
+                    wfm->getSampleRate() != model->getSampleRate() &&
+                    wfm->getSampleRate() == m_sourceSampleRate) {
+                    SVDEBUG << "AudioCallbackPlaySource::addModel: Conflicting wave file model " << *i << " found" << endl;
+                    conflicting = true;
+                    break;
+                }
+            }
+
+            if (conflicting) {
+
+                SVDEBUG << "AudioCallbackPlaySource::addModel: ERROR: "
+                          << "New model sample rate does not match" << endl
+                          << "existing model(s) (new " << model->getSampleRate()
+                          << " vs " << m_sourceSampleRate
+                          << "), playback will be wrong"
+                          << endl;
+                
+                emit sampleRateMismatch(model->getSampleRate(),
+                                        m_sourceSampleRate,
+                                        false);
+            } else {
+                m_sourceSampleRate = model->getSampleRate();
+                srChanged = true;
+            }
+        }
+    }
+
+    if (!m_writeBuffers || (int)m_writeBuffers->size() < getTargetChannelCount()) {
+	clearRingBuffers(true, getTargetChannelCount());
+	buffersChanged = true;
+    } else {
+	if (willPlay) clearRingBuffers(true);
+    }
+
+    if (buffersChanged || srChanged) {
+	if (m_converter) {
+	    src_delete(m_converter);
+            src_delete(m_crapConverter);
+	    m_converter = 0;
+            m_crapConverter = 0;
+	}
+    }
+
+    rebuildRangeLists();
+
+    m_mutex.unlock();
+
+    m_audioGenerator->setTargetChannelCount(getTargetChannelCount());
+
+    if (!m_fillThread) {
+	m_fillThread = new FillThread(*this);
+	m_fillThread->start();
+    }
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "AudioCallbackPlaySource::addModel: now have " << m_models.size() << " model(s) -- emitting modelReplaced" << endl;
+#endif
+
+    if (buffersChanged || srChanged) {
+	emit modelReplaced();
+    }
+
+    connect(model, SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)),
+            this, SLOT(modelChangedWithin(sv_frame_t, sv_frame_t)));
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "AudioCallbackPlaySource::addModel: awakening thread" << endl;
+#endif
+
+    m_condition.wakeAll();
+}
+
+void
+AudioCallbackPlaySource::modelChangedWithin(sv_frame_t 
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+                                            startFrame
+#endif
+                                            , sv_frame_t endFrame)
+{
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    SVDEBUG << "AudioCallbackPlaySource::modelChangedWithin(" << startFrame << "," << endFrame << ")" << endl;
+#endif
+    if (endFrame > m_lastModelEndFrame) {
+        m_lastModelEndFrame = endFrame;
+        rebuildRangeLists();
+    }
+}
+
+void
+AudioCallbackPlaySource::removeModel(Model *model)
+{
+    m_mutex.lock();
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "AudioCallbackPlaySource::removeModel(" << model << ")" << endl;
+#endif
+
+    disconnect(model, SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)),
+               this, SLOT(modelChangedWithin(sv_frame_t, sv_frame_t)));
+
+    m_models.erase(model);
+
+    if (m_models.empty()) {
+	if (m_converter) {
+	    src_delete(m_converter);
+            src_delete(m_crapConverter);
+	    m_converter = 0;
+            m_crapConverter = 0;
+	}
+	m_sourceSampleRate = 0;
+    }
+
+    sv_frame_t lastEnd = 0;
+    for (std::set<Model *>::const_iterator i = m_models.begin();
+	 i != m_models.end(); ++i) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	cout << "AudioCallbackPlaySource::removeModel(" << model << "): checking end frame on model " << *i << endl;
+#endif
+	if ((*i)->getEndFrame() > lastEnd) {
+            lastEnd = (*i)->getEndFrame();
+        }
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	cout << "(done, lastEnd now " << lastEnd << ")" << endl;
+#endif
+    }
+    m_lastModelEndFrame = lastEnd;
+
+    m_audioGenerator->removeModel(model);
+
+    m_mutex.unlock();
+
+    clearRingBuffers();
+}
+
+void
+AudioCallbackPlaySource::clearModels()
+{
+    m_mutex.lock();
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "AudioCallbackPlaySource::clearModels()" << endl;
+#endif
+
+    m_models.clear();
+
+    if (m_converter) {
+	src_delete(m_converter);
+        src_delete(m_crapConverter);
+	m_converter = 0;
+        m_crapConverter = 0;
+    }
+
+    m_lastModelEndFrame = 0;
+
+    m_sourceSampleRate = 0;
+
+    m_mutex.unlock();
+
+    m_audioGenerator->clearModels();
+
+    clearRingBuffers();
+}    
+
+void
+AudioCallbackPlaySource::clearRingBuffers(bool haveLock, int count)
+{
+    if (!haveLock) m_mutex.lock();
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cerr << "clearRingBuffers" << endl;
+#endif
+
+    rebuildRangeLists();
+
+    if (count == 0) {
+	if (m_writeBuffers) count = int(m_writeBuffers->size());
+    }
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cerr << "current playing frame = " << getCurrentPlayingFrame() << endl;
+
+    cerr << "write buffer fill (before) = " << m_writeBufferFill << endl;
+#endif
+    
+    m_writeBufferFill = getCurrentBufferedFrame();
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cerr << "current buffered frame = " << m_writeBufferFill << endl;
+#endif
+
+    if (m_readBuffers != m_writeBuffers) {
+	delete m_writeBuffers;
+    }
+
+    m_writeBuffers = new RingBufferVector;
+
+    for (int i = 0; i < count; ++i) {
+	m_writeBuffers->push_back(new RingBuffer<float>(m_ringBufferSize));
+    }
+
+    m_audioGenerator->reset();
+    
+//    cout << "AudioCallbackPlaySource::clearRingBuffers: Created "
+//	      << count << " write buffers" << endl;
+
+    if (!haveLock) {
+	m_mutex.unlock();
+    }
+}
+
+void
+AudioCallbackPlaySource::play(sv_frame_t startFrame)
+{
+    if (!m_sourceSampleRate) {
+        cerr << "AudioCallbackPlaySource::play: No source sample rate available, not playing" << endl;
+        return;
+    }
+    
+    if (m_viewManager->getPlaySelectionMode() &&
+	!m_viewManager->getSelections().empty()) {
+
+        SVDEBUG << "AudioCallbackPlaySource::play: constraining frame " << startFrame << " to selection = ";
+
+        startFrame = m_viewManager->constrainFrameToSelection(startFrame);
+
+        SVDEBUG << startFrame << endl;
+
+    } else {
+        if (startFrame < 0) {
+            startFrame = 0;
+        }
+	if (startFrame >= m_lastModelEndFrame) {
+	    startFrame = 0;
+	}
+    }
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cerr << "play(" << startFrame << ") -> playback model ";
+#endif
+
+    startFrame = m_viewManager->alignReferenceToPlaybackFrame(startFrame);
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cerr << startFrame << endl;
+#endif
+
+    // The fill thread will automatically empty its buffers before
+    // starting again if we have not so far been playing, but not if
+    // we're just re-seeking.
+    // NO -- we can end up playing some first -- always reset here
+
+    m_mutex.lock();
+
+    if (m_timeStretcher) {
+        m_timeStretcher->reset();
+    }
+    if (m_monoStretcher) {
+        m_monoStretcher->reset();
+    }
+
+    m_readBufferFill = m_writeBufferFill = startFrame;
+    if (m_readBuffers) {
+        for (int c = 0; c < getTargetChannelCount(); ++c) {
+            RingBuffer<float> *rb = getReadRingBuffer(c);
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+            cerr << "reset ring buffer for channel " << c << endl;
+#endif
+            if (rb) rb->reset();
+        }
+    }
+    if (m_converter) src_reset(m_converter);
+    if (m_crapConverter) src_reset(m_crapConverter);
+
+    m_mutex.unlock();
+
+    m_audioGenerator->reset();
+
+    m_playStartFrame = startFrame;
+    m_playStartFramePassed = false;
+    m_playStartedAt = RealTime::zeroTime;
+    if (m_target) {
+        m_playStartedAt = RealTime::fromSeconds(m_target->getCurrentTime());
+    }
+
+    bool changed = !m_playing;
+    m_lastRetrievalTimestamp = 0;
+    m_lastCurrentFrame = 0;
+    m_playing = true;
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "AudioCallbackPlaySource::play: awakening thread" << endl;
+#endif
+
+    m_condition.wakeAll();
+    if (changed) {
+        emit playStatusChanged(m_playing);
+        emit activity(tr("Play from %1").arg
+                      (RealTime::frame2RealTime
+                       (m_playStartFrame, m_sourceSampleRate).toText().c_str()));
+    }
+}
+
+void
+AudioCallbackPlaySource::stop()
+{
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    SVDEBUG << "AudioCallbackPlaySource::stop()" << endl;
+#endif
+    bool changed = m_playing;
+    m_playing = false;
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "AudioCallbackPlaySource::stop: awakening thread" << endl;
+#endif
+
+    m_condition.wakeAll();
+    m_lastRetrievalTimestamp = 0;
+    if (changed) {
+        emit playStatusChanged(m_playing);
+        emit activity(tr("Stop at %1").arg
+                      (RealTime::frame2RealTime
+                       (m_lastCurrentFrame, m_sourceSampleRate).toText().c_str()));
+    }
+    m_lastCurrentFrame = 0;
+}
+
+void
+AudioCallbackPlaySource::selectionChanged()
+{
+    if (m_viewManager->getPlaySelectionMode()) {
+	clearRingBuffers();
+    }
+}
+
+void
+AudioCallbackPlaySource::playLoopModeChanged()
+{
+    clearRingBuffers();
+}
+
+void
+AudioCallbackPlaySource::playSelectionModeChanged()
+{
+    if (!m_viewManager->getSelections().empty()) {
+	clearRingBuffers();
+    }
+}
+
+void
+AudioCallbackPlaySource::playParametersChanged(PlayParameters *)
+{
+    clearRingBuffers();
+}
+
+void
+AudioCallbackPlaySource::preferenceChanged(PropertyContainer::PropertyName n)
+{
+    if (n == "Resample Quality") {
+        setResampleQuality(Preferences::getInstance()->getResampleQuality());
+    }
+}
+
+void
+AudioCallbackPlaySource::audioProcessingOverload()
+{
+    cerr << "Audio processing overload!" << endl;
+
+    if (!m_playing) return;
+
+    RealTimePluginInstance *ap = m_auditioningPlugin;
+    if (ap && !m_auditioningPluginBypassed) {
+        m_auditioningPluginBypassed = true;
+        emit audioOverloadPluginDisabled();
+        return;
+    }
+
+    if (m_timeStretcher &&
+        m_timeStretcher->getTimeRatio() < 1.0 &&
+        m_stretcherInputCount > 1 &&
+        m_monoStretcher && !m_stretchMono) {
+        m_stretchMono = true;
+        emit audioTimeStretchMultiChannelDisabled();
+        return;
+    }
+}
+
+void
+AudioCallbackPlaySource::setSystemPlaybackTarget(breakfastquay::SystemPlaybackTarget *target)
+{
+    m_target = target;
+}
+
+void
+AudioCallbackPlaySource::setSystemPlaybackBlockSize(int size)
+{
+    cout << "AudioCallbackPlaySource::setTarget: Block size -> " << size << endl;
+    if (size != 0) {
+        m_blockSize = size;
+    }
+    if (size * 4 > m_ringBufferSize) {
+        SVDEBUG << "AudioCallbackPlaySource::setTarget: Buffer size "
+                  << size << " > a quarter of ring buffer size "
+                  << m_ringBufferSize << ", calling for more ring buffer"
+                  << endl;
+        m_ringBufferSize = size * 4;
+        if (m_writeBuffers && !m_writeBuffers->empty()) {
+            clearRingBuffers();
+        }
+    }
+}
+
+int
+AudioCallbackPlaySource::getTargetBlockSize() const
+{
+//    cout << "AudioCallbackPlaySource::getTargetBlockSize() -> " << m_blockSize << endl;
+    return int(m_blockSize);
+}
+
+void
+AudioCallbackPlaySource::setSystemPlaybackLatency(int latency)
+{
+    m_playLatency = latency;
+}
+
+sv_frame_t
+AudioCallbackPlaySource::getTargetPlayLatency() const
+{
+    return m_playLatency;
+}
+
+sv_frame_t
+AudioCallbackPlaySource::getCurrentPlayingFrame()
+{
+    // This method attempts to estimate which audio sample frame is
+    // "currently coming through the speakers".
+
+    sv_samplerate_t targetRate = getTargetSampleRate();
+    sv_frame_t latency = m_playLatency; // at target rate
+    RealTime latency_t = RealTime::zeroTime;
+
+    if (targetRate != 0) {
+        latency_t = RealTime::frame2RealTime(latency, targetRate);
+    }
+
+    return getCurrentFrame(latency_t);
+}
+
+sv_frame_t
+AudioCallbackPlaySource::getCurrentBufferedFrame()
+{
+    return getCurrentFrame(RealTime::zeroTime);
+}
+
+sv_frame_t
+AudioCallbackPlaySource::getCurrentFrame(RealTime latency_t)
+{
+    // We resample when filling the ring buffer, and time-stretch when
+    // draining it.  The buffer contains data at the "target rate" and
+    // the latency provided by the target is also at the target rate.
+    // Because of the multiple rates involved, we do the actual
+    // calculation using RealTime instead.
+
+    sv_samplerate_t sourceRate = getSourceSampleRate();
+    sv_samplerate_t targetRate = getTargetSampleRate();
+
+    if (sourceRate == 0 || targetRate == 0) return 0;
+
+    int inbuffer = 0; // at target rate
+
+    for (int c = 0; c < getTargetChannelCount(); ++c) {
+	RingBuffer<float> *rb = getReadRingBuffer(c);
+	if (rb) {
+	    int here = rb->getReadSpace();
+	    if (c == 0 || here < inbuffer) inbuffer = here;
+	}
+    }
+
+    sv_frame_t readBufferFill = m_readBufferFill;
+    sv_frame_t lastRetrievedBlockSize = m_lastRetrievedBlockSize;
+    double lastRetrievalTimestamp = m_lastRetrievalTimestamp;
+    double currentTime = 0.0;
+    if (m_target) currentTime = m_target->getCurrentTime();
+
+    bool looping = m_viewManager->getPlayLoopMode();
+
+    RealTime inbuffer_t = RealTime::frame2RealTime(inbuffer, targetRate);
+
+    sv_frame_t stretchlat = 0;
+    double timeRatio = 1.0;
+
+    if (m_timeStretcher) {
+        stretchlat = m_timeStretcher->getLatency();
+        timeRatio = m_timeStretcher->getTimeRatio();
+    }
+
+    RealTime stretchlat_t = RealTime::frame2RealTime(stretchlat, targetRate);
+
+    // When the target has just requested a block from us, the last
+    // sample it obtained was our buffer fill frame count minus the
+    // amount of read space (converted back to source sample rate)
+    // remaining now.  That sample is not expected to be played until
+    // the target's play latency has elapsed.  By the time the
+    // following block is requested, that sample will be at the
+    // target's play latency minus the last requested block size away
+    // from being played.
+
+    RealTime sincerequest_t = RealTime::zeroTime;
+    RealTime lastretrieved_t = RealTime::zeroTime;
+
+    if (m_target &&
+        m_trustworthyTimestamps &&
+        lastRetrievalTimestamp != 0.0) {
+
+        lastretrieved_t = RealTime::frame2RealTime
+            (lastRetrievedBlockSize, targetRate);
+
+        // calculate number of frames at target rate that have elapsed
+        // since the end of the last call to getSourceSamples
+
+        if (m_trustworthyTimestamps && !looping) {
+
+            // this adjustment seems to cause more problems when looping
+            double elapsed = currentTime - lastRetrievalTimestamp;
+
+            if (elapsed > 0.0) {
+                sincerequest_t = RealTime::fromSeconds(elapsed);
+            }
+        }
+
+    } else {
+
+        lastretrieved_t = RealTime::frame2RealTime
+            (getTargetBlockSize(), targetRate);
+    }
+
+    RealTime bufferedto_t = RealTime::frame2RealTime(readBufferFill, sourceRate);
+
+    if (timeRatio != 1.0) {
+        lastretrieved_t = lastretrieved_t / timeRatio;
+        sincerequest_t = sincerequest_t / timeRatio;
+        latency_t = latency_t / timeRatio;
+    }
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+    cerr << "\nbuffered to: " << bufferedto_t << ", in buffer: " << inbuffer_t << ", time ratio " << timeRatio << "\n  stretcher latency: " << stretchlat_t << ", device latency: " << latency_t << "\n  since request: " << sincerequest_t << ", last retrieved quantity: " << lastretrieved_t << endl;
+#endif
+
+    // Normally the range lists should contain at least one item each
+    // -- if playback is unconstrained, that item should report the
+    // entire source audio duration.
+
+    if (m_rangeStarts.empty()) {
+        rebuildRangeLists();
+    }
+
+    if (m_rangeStarts.empty()) {
+        // this code is only used in case of error in rebuildRangeLists
+        RealTime playing_t = bufferedto_t
+            - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t
+            + sincerequest_t;
+        if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime;
+        sv_frame_t frame = RealTime::realTime2Frame(playing_t, sourceRate);
+        return m_viewManager->alignPlaybackFrameToReference(frame);
+    }
+
+    int inRange = 0;
+    int index = 0;
+
+    for (int i = 0; i < (int)m_rangeStarts.size(); ++i) {
+        if (bufferedto_t >= m_rangeStarts[i]) {
+            inRange = index;
+        } else {
+            break;
+        }
+        ++index;
+    }
+
+    if (inRange >= int(m_rangeStarts.size())) {
+        inRange = int(m_rangeStarts.size())-1;
+    }
+
+    RealTime playing_t = bufferedto_t;
+
+    playing_t = playing_t
+        - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t
+        + sincerequest_t;
+
+    // This rather gross little hack is used to ensure that latency
+    // compensation doesn't result in the playback pointer appearing
+    // to start earlier than the actual playback does.  It doesn't
+    // work properly (hence the bail-out in the middle) because if we
+    // are playing a relatively short looped region, the playing time
+    // estimated from the buffer fill frame may have wrapped around
+    // the region boundary and end up being much smaller than the
+    // theoretical play start frame, perhaps even for the entire
+    // duration of playback!
+
+    if (!m_playStartFramePassed) {
+        RealTime playstart_t = RealTime::frame2RealTime(m_playStartFrame,
+                                                        sourceRate);
+        if (playing_t < playstart_t) {
+//            cerr << "playing_t " << playing_t << " < playstart_t " 
+//                      << playstart_t << endl;
+            if (/*!!! sincerequest_t > RealTime::zeroTime && */
+                m_playStartedAt + latency_t + stretchlat_t <
+                RealTime::fromSeconds(currentTime)) {
+//                cerr << "but we've been playing for long enough that I think we should disregard it (it probably results from loop wrapping)" << endl;
+                m_playStartFramePassed = true;
+            } else {
+                playing_t = playstart_t;
+            }
+        } else {
+            m_playStartFramePassed = true;
+        }
+    }
+ 
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+    cerr << "playing_t " << playing_t;
+#endif
+
+    playing_t = playing_t - m_rangeStarts[inRange];
+ 
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+    cerr << " as offset into range " << inRange << " (start =" << m_rangeStarts[inRange] << " duration =" << m_rangeDurations[inRange] << ") = " << playing_t << endl;
+#endif
+
+    while (playing_t < RealTime::zeroTime) {
+
+        if (inRange == 0) {
+            if (looping) {
+                inRange = int(m_rangeStarts.size()) - 1;
+            } else {
+                break;
+            }
+        } else {
+            --inRange;
+        }
+
+        playing_t = playing_t + m_rangeDurations[inRange];
+    }
+
+    playing_t = playing_t + m_rangeStarts[inRange];
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+    cerr << "  playing time: " << playing_t << endl;
+#endif
+
+    if (!looping) {
+        if (inRange == (int)m_rangeStarts.size()-1 &&
+            playing_t >= m_rangeStarts[inRange] + m_rangeDurations[inRange]) {
+cerr << "Not looping, inRange " << inRange << " == rangeStarts.size()-1, playing_t " << playing_t << " >= m_rangeStarts[inRange] " << m_rangeStarts[inRange] << " + m_rangeDurations[inRange] " << m_rangeDurations[inRange] << " -- stopping" << endl;
+            stop();
+        }
+    }
+
+    if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime;
+
+    sv_frame_t frame = RealTime::realTime2Frame(playing_t, sourceRate);
+
+    if (m_lastCurrentFrame > 0 && !looping) {
+        if (frame < m_lastCurrentFrame) {
+            frame = m_lastCurrentFrame;
+        }
+    }
+
+    m_lastCurrentFrame = frame;
+
+    return m_viewManager->alignPlaybackFrameToReference(frame);
+}
+
+void
+AudioCallbackPlaySource::rebuildRangeLists()
+{
+    bool constrained = (m_viewManager->getPlaySelectionMode());
+
+    m_rangeStarts.clear();
+    m_rangeDurations.clear();
+
+    sv_samplerate_t sourceRate = getSourceSampleRate();
+    if (sourceRate == 0) return;
+
+    RealTime end = RealTime::frame2RealTime(m_lastModelEndFrame, sourceRate);
+    if (end == RealTime::zeroTime) return;
+
+    if (!constrained) {
+        m_rangeStarts.push_back(RealTime::zeroTime);
+        m_rangeDurations.push_back(end);
+        return;
+    }
+
+    MultiSelection::SelectionList selections = m_viewManager->getSelections();
+    MultiSelection::SelectionList::const_iterator i;
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    SVDEBUG << "AudioCallbackPlaySource::rebuildRangeLists" << endl;
+#endif
+
+    if (!selections.empty()) {
+
+        for (i = selections.begin(); i != selections.end(); ++i) {
+            
+            RealTime start =
+                (RealTime::frame2RealTime
+                 (m_viewManager->alignReferenceToPlaybackFrame(i->getStartFrame()),
+                  sourceRate));
+            RealTime duration = 
+                (RealTime::frame2RealTime
+                 (m_viewManager->alignReferenceToPlaybackFrame(i->getEndFrame()) -
+                  m_viewManager->alignReferenceToPlaybackFrame(i->getStartFrame()),
+                  sourceRate));
+            
+            m_rangeStarts.push_back(start);
+            m_rangeDurations.push_back(duration);
+        }
+    } else {
+        m_rangeStarts.push_back(RealTime::zeroTime);
+        m_rangeDurations.push_back(end);
+    }
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cerr << "Now have " << m_rangeStarts.size() << " play ranges" << endl;
+#endif
+}
+
+void
+AudioCallbackPlaySource::setOutputLevels(float left, float right)
+{
+    m_outputLeft = left;
+    m_outputRight = right;
+}
+
+bool
+AudioCallbackPlaySource::getOutputLevels(float &left, float &right)
+{
+    left = m_outputLeft;
+    right = m_outputRight;
+    return true;
+}
+
+void
+AudioCallbackPlaySource::setSystemPlaybackSampleRate(int sr)
+{
+    bool first = (m_targetSampleRate == 0);
+
+    m_targetSampleRate = sr;
+    initialiseConverter();
+
+    if (first && (m_stretchRatio != 1.f)) {
+        // couldn't create a stretcher before because we had no sample
+        // rate: make one now
+        setTimeStretch(m_stretchRatio);
+    }
+}
+
+void
+AudioCallbackPlaySource::initialiseConverter()
+{
+    m_mutex.lock();
+
+    if (m_converter) {
+        src_delete(m_converter);
+        src_delete(m_crapConverter);
+        m_converter = 0;
+        m_crapConverter = 0;
+    }
+
+    if (getSourceSampleRate() != getTargetSampleRate()) {
+
+	int err = 0;
+
+	m_converter = src_new(m_resampleQuality == 2 ? SRC_SINC_BEST_QUALITY :
+                              m_resampleQuality == 1 ? SRC_SINC_MEDIUM_QUALITY :
+                              m_resampleQuality == 0 ? SRC_SINC_FASTEST :
+                                                       SRC_SINC_MEDIUM_QUALITY,
+			      getTargetChannelCount(), &err);
+
+        if (m_converter) {
+            m_crapConverter = src_new(SRC_LINEAR,
+                                      getTargetChannelCount(),
+                                      &err);
+        }
+
+	if (!m_converter || !m_crapConverter) {
+	    cerr
+		<< "AudioCallbackPlaySource::setModel: ERROR in creating samplerate converter: "
+		<< src_strerror(err) << endl;
+
+            if (m_converter) {
+                src_delete(m_converter);
+                m_converter = 0;
+            } 
+
+            if (m_crapConverter) {
+                src_delete(m_crapConverter);
+                m_crapConverter = 0;
+            }
+
+            m_mutex.unlock();
+
+            emit sampleRateMismatch(getSourceSampleRate(),
+                                    getTargetSampleRate(),
+                                    false);
+	} else {
+
+            m_mutex.unlock();
+
+            emit sampleRateMismatch(getSourceSampleRate(),
+                                    getTargetSampleRate(),
+                                    true);
+        }
+    } else {
+        m_mutex.unlock();
+    }
+}
+
+void
+AudioCallbackPlaySource::setResampleQuality(int q)
+{
+    if (q == m_resampleQuality) return;
+    m_resampleQuality = q;
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    SVDEBUG << "AudioCallbackPlaySource::setResampleQuality: setting to "
+              << m_resampleQuality << endl;
+#endif
+
+    initialiseConverter();
+}
+
+void
+AudioCallbackPlaySource::setAuditioningEffect(Auditionable *a)
+{
+    RealTimePluginInstance *plugin = dynamic_cast<RealTimePluginInstance *>(a);
+    if (a && !plugin) {
+        cerr << "WARNING: AudioCallbackPlaySource::setAuditioningEffect: auditionable object " << a << " is not a real-time plugin instance" << endl;
+    }
+
+    m_mutex.lock();
+    m_auditioningPlugin = plugin;
+    m_auditioningPluginBypassed = false;
+    m_mutex.unlock();
+}
+
+void
+AudioCallbackPlaySource::setSoloModelSet(std::set<Model *> s)
+{
+    m_audioGenerator->setSoloModelSet(s);
+    clearRingBuffers();
+}
+
+void
+AudioCallbackPlaySource::clearSoloModelSet()
+{
+    m_audioGenerator->clearSoloModelSet();
+    clearRingBuffers();
+}
+
+sv_samplerate_t
+AudioCallbackPlaySource::getTargetSampleRate() const
+{
+    if (m_targetSampleRate) return m_targetSampleRate;
+    else return getSourceSampleRate();
+}
+
+int
+AudioCallbackPlaySource::getSourceChannelCount() const
+{
+    return m_sourceChannelCount;
+}
+
+int
+AudioCallbackPlaySource::getTargetChannelCount() const
+{
+    if (m_sourceChannelCount < 2) return 2;
+    return m_sourceChannelCount;
+}
+
+sv_samplerate_t
+AudioCallbackPlaySource::getSourceSampleRate() const
+{
+    return m_sourceSampleRate;
+}
+
+void
+AudioCallbackPlaySource::setTimeStretch(double factor)
+{
+    m_stretchRatio = factor;
+
+    if (!getTargetSampleRate()) return; // have to make our stretcher later
+
+    if (m_timeStretcher || (factor == 1.0)) {
+        // stretch ratio will be set in next process call if appropriate
+    } else {
+        m_stretcherInputCount = getTargetChannelCount();
+        RubberBandStretcher *stretcher = new RubberBandStretcher
+            (int(getTargetSampleRate()),
+             m_stretcherInputCount,
+             RubberBandStretcher::OptionProcessRealTime,
+             factor);
+        RubberBandStretcher *monoStretcher = new RubberBandStretcher
+            (int(getTargetSampleRate()),
+             1,
+             RubberBandStretcher::OptionProcessRealTime,
+             factor);
+        m_stretcherInputs = new float *[m_stretcherInputCount];
+        m_stretcherInputSizes = new sv_frame_t[m_stretcherInputCount];
+        for (int c = 0; c < m_stretcherInputCount; ++c) {
+            m_stretcherInputSizes[c] = 16384;
+            m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]];
+        }
+        m_monoStretcher = monoStretcher;
+        m_timeStretcher = stretcher;
+    }
+
+    emit activity(tr("Change time-stretch factor to %1").arg(factor));
+}
+
+void
+AudioCallbackPlaySource::getSourceSamples(int count, float **buffer)
+{
+    if (!m_playing) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+        SVDEBUG << "AudioCallbackPlaySource::getSourceSamples: Not playing" << endl;
+#endif
+	for (int ch = 0; ch < getTargetChannelCount(); ++ch) {
+	    for (int i = 0; i < count; ++i) {
+		buffer[ch][i] = 0.0;
+	    }
+	}
+	return;
+    }
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+    SVDEBUG << "AudioCallbackPlaySource::getSourceSamples: Playing" << endl;
+#endif
+
+    // Ensure that all buffers have at least the amount of data we
+    // need -- else reduce the size of our requests correspondingly
+
+    for (int ch = 0; ch < getTargetChannelCount(); ++ch) {
+
+        RingBuffer<float> *rb = getReadRingBuffer(ch);
+        
+        if (!rb) {
+            cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: "
+                      << "No ring buffer available for channel " << ch
+                      << ", returning no data here" << endl;
+            count = 0;
+            break;
+        }
+
+        int rs = rb->getReadSpace();
+        if (rs < count) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+            cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: "
+                      << "Ring buffer for channel " << ch << " has only "
+                      << rs << " (of " << count << ") samples available ("
+                      << "ring buffer size is " << rb->getSize() << ", write "
+                      << "space " << rb->getWriteSpace() << "), "
+                      << "reducing request size" << endl;
+#endif
+            count = rs;
+        }
+    }
+
+    if (count == 0) return;
+
+    RubberBandStretcher *ts = m_timeStretcher;
+    RubberBandStretcher *ms = m_monoStretcher;
+
+    double ratio = ts ? ts->getTimeRatio() : 1.0;
+
+    if (ratio != m_stretchRatio) {
+        if (!ts) {
+            cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: Time ratio change to " << m_stretchRatio << " is pending, but no stretcher is set" << endl;
+            m_stretchRatio = 1.0;
+        } else {
+            ts->setTimeRatio(m_stretchRatio);
+            if (ms) ms->setTimeRatio(m_stretchRatio);
+            if (m_stretchRatio >= 1.0) m_stretchMono = false;
+        }
+    }
+
+    int stretchChannels = m_stretcherInputCount;
+    if (m_stretchMono) {
+        if (ms) {
+            ts = ms;
+            stretchChannels = 1;
+        } else {
+            m_stretchMono = false;
+        }
+    }
+
+    if (m_target) {
+        m_lastRetrievedBlockSize = count;
+        m_lastRetrievalTimestamp = m_target->getCurrentTime();
+    }
+
+    if (!ts || ratio == 1.f) {
+
+	int got = 0;
+
+	for (int ch = 0; ch < getTargetChannelCount(); ++ch) {
+
+	    RingBuffer<float> *rb = getReadRingBuffer(ch);
+
+	    if (rb) {
+
+		// this is marginally more likely to leave our channels in
+		// sync after a processing failure than just passing "count":
+		sv_frame_t request = count;
+		if (ch > 0) request = got;
+
+		got = rb->read(buffer[ch], int(request));
+	    
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+		cout << "AudioCallbackPlaySource::getSamples: got " << got << " (of " << count << ") samples on channel " << ch << ", signalling for more (possibly)" << endl;
+#endif
+	    }
+
+	    for (int ch = 0; ch < getTargetChannelCount(); ++ch) {
+		for (int i = got; i < count; ++i) {
+		    buffer[ch][i] = 0.0;
+		}
+	    }
+	}
+
+        applyAuditioningEffect(count, buffer);
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "AudioCallbackPlaySource::getSamples: awakening thread" << endl;
+#endif
+
+        m_condition.wakeAll();
+
+	return;
+    }
+
+    int channels = getTargetChannelCount();
+    sv_frame_t available;
+    sv_frame_t fedToStretcher = 0;
+    int warned = 0;
+
+    // The input block for a given output is approx output / ratio,
+    // but we can't predict it exactly, for an adaptive timestretcher.
+
+    while ((available = ts->available()) < count) {
+
+        sv_frame_t reqd = lrint(double(count - available) / ratio);
+        reqd = std::max(reqd, sv_frame_t(ts->getSamplesRequired()));
+        if (reqd == 0) reqd = 1;
+                
+        sv_frame_t got = reqd;
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+        cerr << "reqd = " <<reqd << ", channels = " << channels << ", ic = " << m_stretcherInputCount << endl;
+#endif
+
+        for (int c = 0; c < channels; ++c) {
+            if (c >= m_stretcherInputCount) continue;
+            if (reqd > m_stretcherInputSizes[c]) {
+                if (c == 0) {
+                    cerr << "WARNING: resizing stretcher input buffer from " << m_stretcherInputSizes[c] << " to " << (reqd * 2) << endl;
+                }
+                delete[] m_stretcherInputs[c];
+                m_stretcherInputSizes[c] = reqd * 2;
+                m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]];
+            }
+        }
+
+        for (int c = 0; c < channels; ++c) {
+            if (c >= m_stretcherInputCount) continue;
+            RingBuffer<float> *rb = getReadRingBuffer(c);
+            if (rb) {
+                sv_frame_t gotHere;
+                if (stretchChannels == 1 && c > 0) {
+                    gotHere = rb->readAdding(m_stretcherInputs[0], int(got));
+                } else {
+                    gotHere = rb->read(m_stretcherInputs[c], int(got));
+                }
+                if (gotHere < got) got = gotHere;
+                
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+                if (c == 0) {
+                    SVDEBUG << "feeding stretcher: got " << gotHere
+                              << ", " << rb->getReadSpace() << " remain" << endl;
+                }
+#endif
+                
+            } else {
+                cerr << "WARNING: No ring buffer available for channel " << c << " in stretcher input block" << endl;
+            }
+        }
+
+        if (got < reqd) {
+            cerr << "WARNING: Read underrun in playback ("
+                      << got << " < " << reqd << ")" << endl;
+        }
+
+        ts->process(m_stretcherInputs, size_t(got), false);
+
+        fedToStretcher += got;
+
+        if (got == 0) break;
+
+        if (ts->available() == available) {
+            cerr << "WARNING: AudioCallbackPlaySource::getSamples: Added " << got << " samples to time stretcher, created no new available output samples (warned = " << warned << ")" << endl;
+            if (++warned == 5) break;
+        }
+    }
+
+    ts->retrieve(buffer, size_t(count));
+
+    for (int c = stretchChannels; c < getTargetChannelCount(); ++c) {
+        for (int i = 0; i < count; ++i) {
+            buffer[c][i] = buffer[0][i];
+        }
+    }
+
+    applyAuditioningEffect(count, buffer);
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "AudioCallbackPlaySource::getSamples [stretched]: awakening thread" << endl;
+#endif
+
+    m_condition.wakeAll();
+
+    return;
+}
+
+void
+AudioCallbackPlaySource::applyAuditioningEffect(sv_frame_t count, float **buffers)
+{
+    if (m_auditioningPluginBypassed) return;
+    RealTimePluginInstance *plugin = m_auditioningPlugin;
+    if (!plugin) return;
+    
+    if ((int)plugin->getAudioInputCount() != getTargetChannelCount()) {
+//        cerr << "plugin input count " << plugin->getAudioInputCount() 
+//                  << " != our channel count " << getTargetChannelCount()
+//                  << endl;
+        return;
+    }
+    if ((int)plugin->getAudioOutputCount() != getTargetChannelCount()) {
+//        cerr << "plugin output count " << plugin->getAudioOutputCount() 
+//                  << " != our channel count " << getTargetChannelCount()
+//                  << endl;
+        return;
+    }
+    if ((int)plugin->getBufferSize() < count) {
+//        cerr << "plugin buffer size " << plugin->getBufferSize() 
+//                  << " < our block size " << count
+//                  << endl;
+        return;
+    }
+
+    float **ib = plugin->getAudioInputBuffers();
+    float **ob = plugin->getAudioOutputBuffers();
+
+    for (int c = 0; c < getTargetChannelCount(); ++c) {
+        for (int i = 0; i < count; ++i) {
+            ib[c][i] = buffers[c][i];
+        }
+    }
+
+    plugin->run(Vamp::RealTime::zeroTime, int(count));
+    
+    for (int c = 0; c < getTargetChannelCount(); ++c) {
+        for (int i = 0; i < count; ++i) {
+            buffers[c][i] = ob[c][i];
+        }
+    }
+}    
+
+// Called from fill thread, m_playing true, mutex held
+bool
+AudioCallbackPlaySource::fillBuffers()
+{
+    static float *tmp = 0;
+    static sv_frame_t tmpSize = 0;
+
+    sv_frame_t space = 0;
+    for (int c = 0; c < getTargetChannelCount(); ++c) {
+	RingBuffer<float> *wb = getWriteRingBuffer(c);
+	if (wb) {
+	    sv_frame_t spaceHere = wb->getWriteSpace();
+	    if (c == 0 || spaceHere < space) space = spaceHere;
+	}
+    }
+    
+    if (space == 0) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+        cout << "AudioCallbackPlaySourceFillThread: no space to fill" << endl;
+#endif
+        return false;
+    }
+
+    sv_frame_t f = m_writeBufferFill;
+	
+    bool readWriteEqual = (m_readBuffers == m_writeBuffers);
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    if (!readWriteEqual) {
+        cout << "AudioCallbackPlaySourceFillThread: note read buffers != write buffers" << endl;
+    }
+    cout << "AudioCallbackPlaySourceFillThread: filling " << space << " frames" << endl;
+#endif
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "buffered to " << f << " already" << endl;
+#endif
+
+    bool resample = (getSourceSampleRate() != getTargetSampleRate());
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << (resample ? "" : "not ") << "resampling (source " << getSourceSampleRate() << ", target " << getTargetSampleRate() << ")" << endl;
+#endif
+
+    int channels = getTargetChannelCount();
+
+    sv_frame_t orig = space;
+    sv_frame_t got = 0;
+
+    static float **bufferPtrs = 0;
+    static int bufferPtrCount = 0;
+
+    if (bufferPtrCount < channels) {
+	if (bufferPtrs) delete[] bufferPtrs;
+	bufferPtrs = new float *[channels];
+	bufferPtrCount = channels;
+    }
+
+    sv_frame_t generatorBlockSize = m_audioGenerator->getBlockSize();
+
+    if (resample && !m_converter) {
+	static bool warned = false;
+	if (!warned) {
+	    cerr << "WARNING: sample rates differ, but no converter available!" << endl;
+	    warned = true;
+	}
+    }
+
+    if (resample && m_converter) {
+
+	double ratio =
+	    double(getTargetSampleRate()) / double(getSourceSampleRate());
+	orig = sv_frame_t(double(orig) / ratio + 0.1);
+
+	// orig must be a multiple of generatorBlockSize
+	orig = (orig / generatorBlockSize) * generatorBlockSize;
+	if (orig == 0) return false;
+
+	sv_frame_t work = std::max(orig, space);
+
+	// We only allocate one buffer, but we use it in two halves.
+	// We place the non-interleaved values in the second half of
+	// the buffer (orig samples for channel 0, orig samples for
+	// channel 1 etc), and then interleave them into the first
+	// half of the buffer.  Then we resample back into the second
+	// half (interleaved) and de-interleave the results back to
+	// the start of the buffer for insertion into the ringbuffers.
+	// What a faff -- especially as we've already de-interleaved
+	// the audio data from the source file elsewhere before we
+	// even reach this point.
+	
+	if (tmpSize < channels * work * 2) {
+	    delete[] tmp;
+	    tmp = new float[channels * work * 2];
+	    tmpSize = channels * work * 2;
+	}
+
+	float *nonintlv = tmp + channels * work;
+	float *intlv = tmp;
+	float *srcout = tmp + channels * work;
+	
+	for (int c = 0; c < channels; ++c) {
+	    for (int i = 0; i < orig; ++i) {
+		nonintlv[channels * i + c] = 0.0f;
+	    }
+	}
+
+	for (int c = 0; c < channels; ++c) {
+	    bufferPtrs[c] = nonintlv + c * orig;
+	}
+
+	got = mixModels(f, orig, bufferPtrs); // also modifies f
+
+	// and interleave into first half
+	for (int c = 0; c < channels; ++c) {
+	    for (int i = 0; i < got; ++i) {
+		float sample = nonintlv[c * got + i];
+		intlv[channels * i + c] = sample;
+	    }
+	}
+		
+	SRC_DATA data;
+	data.data_in = intlv;
+	data.data_out = srcout;
+	data.input_frames = long(got);
+	data.output_frames = long(work);
+	data.src_ratio = ratio;
+	data.end_of_input = 0;
+	
+	int err = 0;
+
+        if (m_timeStretcher && m_timeStretcher->getTimeRatio() < 0.4) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+            cout << "Using crappy converter" << endl;
+#endif
+            err = src_process(m_crapConverter, &data);
+        } else {
+            err = src_process(m_converter, &data);
+        }
+
+	sv_frame_t toCopy = sv_frame_t(double(got) * ratio + 0.1);
+
+	if (err) {
+	    cerr
+		<< "AudioCallbackPlaySourceFillThread: ERROR in samplerate conversion: "
+		<< src_strerror(err) << endl;
+	    //!!! Then what?
+	} else {
+	    got = data.input_frames_used;
+	    toCopy = data.output_frames_gen;
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	    cout << "Resampled " << got << " frames to " << toCopy << " frames" << endl;
+#endif
+	}
+	
+	for (int c = 0; c < channels; ++c) {
+	    for (int i = 0; i < toCopy; ++i) {
+		tmp[i] = srcout[channels * i + c];
+	    }
+	    RingBuffer<float> *wb = getWriteRingBuffer(c);
+	    if (wb) wb->write(tmp, int(toCopy));
+	}
+
+	m_writeBufferFill = f;
+	if (readWriteEqual) m_readBufferFill = f;
+
+    } else {
+
+	// space must be a multiple of generatorBlockSize
+        sv_frame_t reqSpace = space;
+	space = (reqSpace / generatorBlockSize) * generatorBlockSize;
+	if (space == 0) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+            cout << "requested fill of " << reqSpace
+                      << " is less than generator block size of "
+                      << generatorBlockSize << ", leaving it" << endl;
+#endif
+            return false;
+        }
+
+	if (tmpSize < channels * space) {
+	    delete[] tmp;
+	    tmp = new float[channels * space];
+	    tmpSize = channels * space;
+	}
+
+	for (int c = 0; c < channels; ++c) {
+
+	    bufferPtrs[c] = tmp + c * space;
+	    
+	    for (int i = 0; i < space; ++i) {
+		tmp[c * space + i] = 0.0f;
+	    }
+	}
+
+	sv_frame_t got = mixModels(f, space, bufferPtrs); // also modifies f
+
+	for (int c = 0; c < channels; ++c) {
+
+	    RingBuffer<float> *wb = getWriteRingBuffer(c);
+	    if (wb) {
+                int actual = wb->write(bufferPtrs[c], int(got));
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+		cout << "Wrote " << actual << " samples for ch " << c << ", now "
+			  << wb->getReadSpace() << " to read" 
+			  << endl;
+#endif
+                if (actual < got) {
+                    cerr << "WARNING: Buffer overrun in channel " << c
+                              << ": wrote " << actual << " of " << got
+                              << " samples" << endl;
+                }
+            }
+	}
+
+	m_writeBufferFill = f;
+	if (readWriteEqual) m_readBufferFill = f;
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+        cout << "Read buffer fill is now " << m_readBufferFill << endl;
+#endif
+
+	//!!! how do we know when ended? need to mark up a fully-buffered flag and check this if we find the buffers empty in getSourceSamples
+    }
+
+    return true;
+}    
+
+sv_frame_t
+AudioCallbackPlaySource::mixModels(sv_frame_t &frame, sv_frame_t count, float **buffers)
+{
+    sv_frame_t processed = 0;
+    sv_frame_t chunkStart = frame;
+    sv_frame_t chunkSize = count;
+    sv_frame_t selectionSize = 0;
+    sv_frame_t nextChunkStart = chunkStart + chunkSize;
+    
+    bool looping = m_viewManager->getPlayLoopMode();
+    bool constrained = (m_viewManager->getPlaySelectionMode() &&
+			!m_viewManager->getSelections().empty());
+
+    static float **chunkBufferPtrs = 0;
+    static int chunkBufferPtrCount = 0;
+    int channels = getTargetChannelCount();
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "Selection playback: start " << frame << ", size " << count <<", channels " << channels << endl;
+#endif
+
+    if (chunkBufferPtrCount < channels) {
+	if (chunkBufferPtrs) delete[] chunkBufferPtrs;
+	chunkBufferPtrs = new float *[channels];
+	chunkBufferPtrCount = channels;
+    }
+
+    for (int c = 0; c < channels; ++c) {
+	chunkBufferPtrs[c] = buffers[c];
+    }
+
+    while (processed < count) {
+	
+	chunkSize = count - processed;
+	nextChunkStart = chunkStart + chunkSize;
+	selectionSize = 0;
+
+	sv_frame_t fadeIn = 0, fadeOut = 0;
+
+	if (constrained) {
+
+            sv_frame_t rChunkStart =
+                m_viewManager->alignPlaybackFrameToReference(chunkStart);
+	    
+	    Selection selection =
+		m_viewManager->getContainingSelection(rChunkStart, true);
+	    
+	    if (selection.isEmpty()) {
+		if (looping) {
+		    selection = *m_viewManager->getSelections().begin();
+		    chunkStart = m_viewManager->alignReferenceToPlaybackFrame
+                        (selection.getStartFrame());
+		    fadeIn = 50;
+		}
+	    }
+
+	    if (selection.isEmpty()) {
+
+		chunkSize = 0;
+		nextChunkStart = chunkStart;
+
+	    } else {
+
+                sv_frame_t sf = m_viewManager->alignReferenceToPlaybackFrame
+                    (selection.getStartFrame());
+                sv_frame_t ef = m_viewManager->alignReferenceToPlaybackFrame
+                    (selection.getEndFrame());
+
+		selectionSize = ef - sf;
+
+		if (chunkStart < sf) {
+		    chunkStart = sf;
+		    fadeIn = 50;
+		}
+
+		nextChunkStart = chunkStart + chunkSize;
+
+		if (nextChunkStart >= ef) {
+		    nextChunkStart = ef;
+		    fadeOut = 50;
+		}
+
+		chunkSize = nextChunkStart - chunkStart;
+	    }
+	
+	} else if (looping && m_lastModelEndFrame > 0) {
+
+	    if (chunkStart >= m_lastModelEndFrame) {
+		chunkStart = 0;
+	    }
+	    if (chunkSize > m_lastModelEndFrame - chunkStart) {
+		chunkSize = m_lastModelEndFrame - chunkStart;
+	    }
+	    nextChunkStart = chunkStart + chunkSize;
+	}
+	
+//	cout << "chunkStart " << chunkStart << ", chunkSize " << chunkSize << ", nextChunkStart " << nextChunkStart << ", frame " << frame << ", count " << count << ", processed " << processed << endl;
+
+	if (!chunkSize) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	    cout << "Ending selection playback at " << nextChunkStart << endl;
+#endif
+	    // We need to maintain full buffers so that the other
+	    // thread can tell where it's got to in the playback -- so
+	    // return the full amount here
+	    frame = frame + count;
+	    return count;
+	}
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	cout << "Selection playback: chunk at " << chunkStart << " -> " << nextChunkStart << " (size " << chunkSize << ")" << endl;
+#endif
+
+	if (selectionSize < 100) {
+	    fadeIn = 0;
+	    fadeOut = 0;
+	} else if (selectionSize < 300) {
+	    if (fadeIn > 0) fadeIn = 10;
+	    if (fadeOut > 0) fadeOut = 10;
+	}
+
+	if (fadeIn > 0) {
+	    if (processed * 2 < fadeIn) {
+		fadeIn = processed * 2;
+	    }
+	}
+
+	if (fadeOut > 0) {
+	    if ((count - processed - chunkSize) * 2 < fadeOut) {
+		fadeOut = (count - processed - chunkSize) * 2;
+	    }
+	}
+
+	for (std::set<Model *>::iterator mi = m_models.begin();
+	     mi != m_models.end(); ++mi) {
+	    
+	    (void) m_audioGenerator->mixModel(*mi, chunkStart, 
+                                              chunkSize, chunkBufferPtrs,
+                                              fadeIn, fadeOut);
+	}
+
+	for (int c = 0; c < channels; ++c) {
+	    chunkBufferPtrs[c] += chunkSize;
+	}
+
+	processed += chunkSize;
+	chunkStart = nextChunkStart;
+    }
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "Returning selection playback " << processed << " frames to " << nextChunkStart << endl;
+#endif
+
+    frame = nextChunkStart;
+    return processed;
+}
+
+void
+AudioCallbackPlaySource::unifyRingBuffers()
+{
+    if (m_readBuffers == m_writeBuffers) return;
+
+    // only unify if there will be something to read
+    for (int c = 0; c < getTargetChannelCount(); ++c) {
+	RingBuffer<float> *wb = getWriteRingBuffer(c);
+	if (wb) {
+	    if (wb->getReadSpace() < m_blockSize * 2) {
+		if ((m_writeBufferFill + m_blockSize * 2) < 
+		    m_lastModelEndFrame) {
+		    // OK, we don't have enough and there's more to
+		    // read -- don't unify until we can do better
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+                    SVDEBUG << "AudioCallbackPlaySource::unifyRingBuffers: Not unifying: write buffer has less (" << wb->getReadSpace() << ") than " << m_blockSize*2 << " to read and write buffer fill (" << m_writeBufferFill << ") is not close to end frame (" << m_lastModelEndFrame << ")" << endl;
+#endif
+		    return;
+		}
+	    }
+	    break;
+	}
+    }
+
+    sv_frame_t rf = m_readBufferFill;
+    RingBuffer<float> *rb = getReadRingBuffer(0);
+    if (rb) {
+	int rs = rb->getReadSpace();
+	//!!! incorrect when in non-contiguous selection, see comments elsewhere
+//	cout << "rs = " << rs << endl;
+	if (rs < rf) rf -= rs;
+	else rf = 0;
+    }
+    
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+    SVDEBUG << "AudioCallbackPlaySource::unifyRingBuffers: m_readBufferFill = " << m_readBufferFill << ", rf = " << rf << ", m_writeBufferFill = " << m_writeBufferFill << endl;
+#endif
+
+    sv_frame_t wf = m_writeBufferFill;
+    sv_frame_t skip = 0;
+    for (int c = 0; c < getTargetChannelCount(); ++c) {
+	RingBuffer<float> *wb = getWriteRingBuffer(c);
+	if (wb) {
+	    if (c == 0) {
+		
+		int wrs = wb->getReadSpace();
+//		cout << "wrs = " << wrs << endl;
+
+		if (wrs < wf) wf -= wrs;
+		else wf = 0;
+//		cout << "wf = " << wf << endl;
+		
+		if (wf < rf) skip = rf - wf;
+		if (skip == 0) break;
+	    }
+
+//	    cout << "skipping " << skip << endl;
+	    wb->skip(int(skip));
+	}
+    }
+		    
+    m_bufferScavenger.claim(m_readBuffers);
+    m_readBuffers = m_writeBuffers;
+    m_readBufferFill = m_writeBufferFill;
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+    cerr << "unified" << endl;
+#endif
+}
+
+void
+AudioCallbackPlaySource::FillThread::run()
+{
+    AudioCallbackPlaySource &s(m_source);
+    
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    cout << "AudioCallbackPlaySourceFillThread starting" << endl;
+#endif
+
+    s.m_mutex.lock();
+
+    bool previouslyPlaying = s.m_playing;
+    bool work = false;
+
+    while (!s.m_exiting) {
+
+	s.unifyRingBuffers();
+	s.m_bufferScavenger.scavenge();
+        s.m_pluginScavenger.scavenge();
+
+	if (work && s.m_playing && s.getSourceSampleRate()) {
+	    
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	    cout << "AudioCallbackPlaySourceFillThread: not waiting" << endl;
+#endif
+
+	    s.m_mutex.unlock();
+	    s.m_mutex.lock();
+
+	} else {
+	    
+	    double ms = 100;
+	    if (s.getSourceSampleRate() > 0) {
+		ms = double(s.m_ringBufferSize) / s.getSourceSampleRate() * 1000.0;
+	    }
+	    
+	    if (s.m_playing) ms /= 10;
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+            if (!s.m_playing) cout << endl;
+	    cout << "AudioCallbackPlaySourceFillThread: waiting for " << ms << "ms..." << endl;
+#endif
+	    
+	    s.m_condition.wait(&s.m_mutex, int(ms));
+	}
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	cout << "AudioCallbackPlaySourceFillThread: awoken" << endl;
+#endif
+
+	work = false;
+
+	if (!s.getSourceSampleRate()) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+            cout << "AudioCallbackPlaySourceFillThread: source sample rate is zero" << endl;
+#endif
+            continue;
+        }
+
+	bool playing = s.m_playing;
+
+	if (playing && !previouslyPlaying) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	    cout << "AudioCallbackPlaySourceFillThread: playback state changed, resetting" << endl;
+#endif
+	    for (int c = 0; c < s.getTargetChannelCount(); ++c) {
+		RingBuffer<float> *rb = s.getReadRingBuffer(c);
+		if (rb) rb->reset();
+	    }
+	}
+	previouslyPlaying = playing;
+
+	work = s.fillBuffers();
+    }
+
+    s.m_mutex.unlock();
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audio/AudioCallbackPlaySource.h	Tue Aug 04 13:27:42 2015 +0100
@@ -0,0 +1,407 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam and QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef _AUDIO_CALLBACK_PLAY_SOURCE_H_
+#define _AUDIO_CALLBACK_PLAY_SOURCE_H_
+
+#include "base/RingBuffer.h"
+#include "base/AudioPlaySource.h"
+#include "base/PropertyContainer.h"
+#include "base/Scavenger.h"
+
+#include <bqaudioio/ApplicationPlaybackSource.h>
+
+#include <QObject>
+#include <QMutex>
+#include <QWaitCondition>
+
+#include "base/Thread.h"
+#include "base/RealTime.h"
+
+#include <samplerate.h>
+
+#include <set>
+#include <map>
+
+namespace RubberBand {
+    class RubberBandStretcher;
+}
+
+class Model;
+class ViewManagerBase;
+class AudioGenerator;
+class PlayParameters;
+class RealTimePluginInstance;
+class AudioCallbackPlayTarget;
+
+/**
+ * AudioCallbackPlaySource manages audio data supply to callback-based
+ * audio APIs such as JACK or CoreAudio.  It maintains one ring buffer
+ * per channel, filled during playback by a non-realtime thread, and
+ * provides a method for a realtime thread to pick up the latest
+ * available sample data from these buffers.
+ */
+class AudioCallbackPlaySource : public QObject,
+				public AudioPlaySource,
+                                public breakfastquay::ApplicationPlaybackSource
+{
+    Q_OBJECT
+
+public:
+    AudioCallbackPlaySource(ViewManagerBase *, QString clientName);
+    virtual ~AudioCallbackPlaySource();
+    
+    /**
+     * Add a data model to be played from.  The source can mix
+     * playback from a number of sources including dense and sparse
+     * models.  The models must match in sample rate, but they don't
+     * have to have identical numbers of channels.
+     */
+    virtual void addModel(Model *model);
+
+    /**
+     * Remove a model.
+     */
+    virtual void removeModel(Model *model);
+
+    /**
+     * Remove all models.  (Silence will ensue.)
+     */
+    virtual void clearModels();
+
+    /**
+     * Start making data available in the ring buffers for playback,
+     * from the given frame.  If playback is already under way, reseek
+     * to the given frame and continue.
+     */
+    virtual void play(sv_frame_t startFrame);
+
+    /**
+     * Stop playback and ensure that no more data is returned.
+     */
+    virtual void stop();
+
+    /**
+     * Return whether playback is currently supposed to be happening.
+     */
+    virtual bool isPlaying() const { return m_playing; }
+
+    /**
+     * Return the frame number that is currently expected to be coming
+     * out of the speakers.  (i.e. compensating for playback latency.)
+     */
+    virtual sv_frame_t getCurrentPlayingFrame();
+    
+    /** 
+     * Return the last frame that would come out of the speakers if we
+     * stopped playback right now.
+     */
+    virtual sv_frame_t getCurrentBufferedFrame();
+
+    /**
+     * Return the frame at which playback is expected to end (if not looping).
+     */
+    virtual sv_frame_t getPlayEndFrame() { return m_lastModelEndFrame; }
+
+    /**
+     * Set the playback target.  This should be called by the target
+     * class.
+     */
+    virtual void setSystemPlaybackTarget(breakfastquay::SystemPlaybackTarget *);
+
+    /**
+     * Set the block size of the target audio device.  This should be
+     * called by the target class.
+     */
+    virtual void setSystemPlaybackBlockSize(int blockSize);
+
+    /**
+     * Get the block size of the target audio device.  This may be an
+     * estimate or upper bound, if the target has a variable block
+     * size; the source should behave itself even if this value turns
+     * out to be inaccurate.
+     */
+    int getTargetBlockSize() const;
+
+    /**
+     * Set the playback latency of the target audio device, in frames
+     * at the target sample rate.  This is the difference between the
+     * frame currently "leaving the speakers" and the last frame (or
+     * highest last frame across all channels) requested via
+     * getSamples().  The default is zero.
+     */
+    void setSystemPlaybackLatency(int);
+
+    /**
+     * Get the playback latency of the target audio device.
+     */
+    sv_frame_t getTargetPlayLatency() const;
+
+    /**
+     * Specify that the target audio device has a fixed sample rate
+     * (i.e. cannot accommodate arbitrary sample rates based on the
+     * source).  If the target sets this to something other than the
+     * source sample rate, this class will resample automatically to
+     * fit.
+     */
+    void setSystemPlaybackSampleRate(int);
+
+    /**
+     * Return the sample rate set by the target audio device (or the
+     * source sample rate if the target hasn't set one).
+     */
+    virtual sv_samplerate_t getTargetSampleRate() const;
+
+    /**
+     * Set the current output levels for metering (for call from the
+     * target)
+     */
+    void setOutputLevels(float left, float right);
+
+    /**
+     * Return the current (or thereabouts) output levels in the range
+     * 0.0 -> 1.0, for metering purposes.
+     */
+    virtual bool getOutputLevels(float &left, float &right);
+
+    /**
+     * Get the number of channels of audio that in the source models.
+     * This may safely be called from a realtime thread.  Returns 0 if
+     * there is no source yet available.
+     */
+    int getSourceChannelCount() const;
+
+    /**
+     * Get the number of channels of audio that will be provided
+     * to the play target.  This may be more than the source channel
+     * count: for example, a mono source will provide 2 channels
+     * after pan.
+     * This may safely be called from a realtime thread.  Returns 0 if
+     * there is no source yet available.
+     */
+    int getTargetChannelCount() const;
+
+    /**
+     * ApplicationPlaybackSource equivalent of the above.
+     */
+    virtual int getApplicationChannelCount() const {
+        return getTargetChannelCount();
+    }
+    
+    /**
+     * Get the actual sample rate of the source material.  This may
+     * safely be called from a realtime thread.  Returns 0 if there is
+     * no source yet available.
+     */
+    virtual sv_samplerate_t getSourceSampleRate() const;
+
+    /**
+     * ApplicationPlaybackSource equivalent of the above.
+     */
+    virtual int getApplicationSampleRate() const {
+        return int(round(getSourceSampleRate()));
+    }
+
+    /**
+     * Get "count" samples (at the target sample rate) of the mixed
+     * audio data, in all channels.  This may safely be called from a
+     * realtime thread.
+     */
+    virtual void getSourceSamples(int count, float **buffer);
+
+    /**
+     * Set the time stretcher factor (i.e. playback speed).
+     */
+    void setTimeStretch(double factor);
+
+    /**
+     * Set the resampler quality, 0 - 2 where 0 is fastest and 2 is
+     * highest quality.
+     */
+    void setResampleQuality(int q);
+
+    /**
+     * Set a single real-time plugin as a processing effect for
+     * auditioning during playback.
+     *
+     * The plugin must have been initialised with
+     * getTargetChannelCount() channels and a getTargetBlockSize()
+     * sample frame processing block size.
+     *
+     * This playback source takes ownership of the plugin, which will
+     * be deleted at some point after the following call to
+     * setAuditioningEffect (depending on real-time constraints).
+     *
+     * Pass a null pointer to remove the current auditioning plugin,
+     * if any.
+     */
+    void setAuditioningEffect(Auditionable *plugin);
+
+    /**
+     * Specify that only the given set of models should be played.
+     */
+    void setSoloModelSet(std::set<Model *>s);
+
+    /**
+     * Specify that all models should be played as normal (if not
+     * muted).
+     */
+    void clearSoloModelSet();
+
+    std::string getClientName() const { return m_clientName; }
+
+signals:
+    void modelReplaced();
+
+    void playStatusChanged(bool isPlaying);
+
+    void sampleRateMismatch(sv_samplerate_t requested,
+                            sv_samplerate_t available,
+                            bool willResample);
+
+    void audioOverloadPluginDisabled();
+    void audioTimeStretchMultiChannelDisabled();
+
+    void activity(QString);
+
+public slots:
+    void audioProcessingOverload();
+
+protected slots:
+    void selectionChanged();
+    void playLoopModeChanged();
+    void playSelectionModeChanged();
+    void playParametersChanged(PlayParameters *);
+    void preferenceChanged(PropertyContainer::PropertyName);
+    void modelChangedWithin(sv_frame_t startFrame, sv_frame_t endFrame);
+
+protected:
+    ViewManagerBase                  *m_viewManager;
+    AudioGenerator                   *m_audioGenerator;
+    std::string                       m_clientName;
+
+    class RingBufferVector : public std::vector<RingBuffer<float> *> {
+    public:
+	virtual ~RingBufferVector() {
+	    while (!empty()) {
+		delete *begin();
+		erase(begin());
+	    }
+	}
+    };
+
+    std::set<Model *>                 m_models;
+    RingBufferVector                 *m_readBuffers;
+    RingBufferVector                 *m_writeBuffers;
+    sv_frame_t                        m_readBufferFill;
+    sv_frame_t                        m_writeBufferFill;
+    Scavenger<RingBufferVector>       m_bufferScavenger;
+    int                               m_sourceChannelCount;
+    sv_frame_t                        m_blockSize;
+    sv_samplerate_t                   m_sourceSampleRate;
+    sv_samplerate_t                   m_targetSampleRate;
+    sv_frame_t                        m_playLatency;
+    breakfastquay::SystemPlaybackTarget *m_target;
+    double                            m_lastRetrievalTimestamp;
+    sv_frame_t                        m_lastRetrievedBlockSize;
+    bool                              m_trustworthyTimestamps;
+    sv_frame_t                        m_lastCurrentFrame;
+    bool                              m_playing;
+    bool                              m_exiting;
+    sv_frame_t                        m_lastModelEndFrame;
+    int                               m_ringBufferSize;
+    float                             m_outputLeft;
+    float                             m_outputRight;
+    RealTimePluginInstance           *m_auditioningPlugin;
+    bool                              m_auditioningPluginBypassed;
+    Scavenger<RealTimePluginInstance> m_pluginScavenger;
+    sv_frame_t                        m_playStartFrame;
+    bool                              m_playStartFramePassed;
+    RealTime                          m_playStartedAt;
+
+    RingBuffer<float> *getWriteRingBuffer(int c) {
+	if (m_writeBuffers && c < (int)m_writeBuffers->size()) {
+	    return (*m_writeBuffers)[c];
+	} else {
+	    return 0;
+	}
+    }
+
+    RingBuffer<float> *getReadRingBuffer(int c) {
+	RingBufferVector *rb = m_readBuffers;
+	if (rb && c < (int)rb->size()) {
+	    return (*rb)[c];
+	} else {
+	    return 0;
+	}
+    }
+
+    void clearRingBuffers(bool haveLock = false, int count = 0);
+    void unifyRingBuffers();
+
+    RubberBand::RubberBandStretcher *m_timeStretcher;
+    RubberBand::RubberBandStretcher *m_monoStretcher;
+    double m_stretchRatio;
+    bool m_stretchMono;
+    
+    int m_stretcherInputCount;
+    float **m_stretcherInputs;
+    sv_frame_t *m_stretcherInputSizes;
+
+    // Called from fill thread, m_playing true, mutex held
+    // Return true if work done
+    bool fillBuffers();
+    
+    // Called from fillBuffers.  Return the number of frames written,
+    // which will be count or fewer.  Return in the frame argument the
+    // new buffered frame position (which may be earlier than the
+    // frame argument passed in, in the case of looping).
+    sv_frame_t mixModels(sv_frame_t &frame, sv_frame_t count, float **buffers);
+
+    // Called from getSourceSamples.
+    void applyAuditioningEffect(sv_frame_t count, float **buffers);
+
+    // Ranges of current selections, if play selection is active
+    std::vector<RealTime> m_rangeStarts;
+    std::vector<RealTime> m_rangeDurations;
+    void rebuildRangeLists();
+
+    sv_frame_t getCurrentFrame(RealTime outputLatency);
+
+    class FillThread : public Thread
+    {
+    public:
+	FillThread(AudioCallbackPlaySource &source) :
+            Thread(Thread::NonRTThread),
+	    m_source(source) { }
+
+	virtual void run();
+
+    protected:
+	AudioCallbackPlaySource &m_source;
+    };
+
+    QMutex m_mutex;
+    QWaitCondition m_condition;
+    FillThread *m_fillThread;
+    SRC_STATE *m_converter;
+    SRC_STATE *m_crapConverter; // for use when playing very fast
+    int m_resampleQuality;
+    void initialiseConverter();
+};
+
+#endif
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audio/AudioGenerator.cpp	Tue Aug 04 13:27:42 2015 +0100
@@ -0,0 +1,709 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "AudioGenerator.h"
+
+#include "base/TempDirectory.h"
+#include "base/PlayParameters.h"
+#include "base/PlayParameterRepository.h"
+#include "base/Pitch.h"
+#include "base/Exceptions.h"
+
+#include "data/model/NoteModel.h"
+#include "data/model/FlexiNoteModel.h"
+#include "data/model/DenseTimeValueModel.h"
+#include "data/model/SparseTimeValueModel.h"
+#include "data/model/SparseOneDimensionalModel.h"
+#include "data/model/NoteData.h"
+
+#include "ClipMixer.h"
+#include "ContinuousSynth.h"
+
+#include <iostream>
+#include <cmath>
+
+#include <QDir>
+#include <QFile>
+
+const sv_frame_t
+AudioGenerator::m_processingBlockSize = 1024;
+
+QString
+AudioGenerator::m_sampleDir = "";
+
+//#define DEBUG_AUDIO_GENERATOR 1
+
+AudioGenerator::AudioGenerator() :
+    m_sourceSampleRate(0),
+    m_targetChannelCount(1),
+    m_waveType(0),
+    m_soloing(false),
+    m_channelBuffer(0),
+    m_channelBufSiz(0),
+    m_channelBufCount(0)
+{
+    initialiseSampleDir();
+
+    connect(PlayParameterRepository::getInstance(),
+            SIGNAL(playClipIdChanged(const Playable *, QString)),
+            this,
+            SLOT(playClipIdChanged(const Playable *, QString)));
+}
+
+AudioGenerator::~AudioGenerator()
+{
+#ifdef DEBUG_AUDIO_GENERATOR
+    SVDEBUG << "AudioGenerator::~AudioGenerator" << endl;
+#endif
+}
+
+void
+AudioGenerator::initialiseSampleDir()
+{
+    if (m_sampleDir != "") return;
+
+    try {
+        m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
+    } catch (DirectoryCreationFailed f) {
+        cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
+                  << " Failed to create temporary sample directory"
+                  << endl;
+        m_sampleDir = "";
+        return;
+    }
+
+    QDir sampleResourceDir(":/samples", "*.wav");
+
+    for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
+
+        QString fileName(sampleResourceDir[i]);
+        QFile file(sampleResourceDir.filePath(fileName));
+        QString target = QDir(m_sampleDir).filePath(fileName);
+
+        if (!file.copy(target)) {
+            cerr << "WARNING: AudioGenerator::getSampleDir: "
+                      << "Unable to copy " << fileName
+                      << " into temporary directory \""
+                      << m_sampleDir << "\"" << endl;
+        } else {
+            QFile tf(target);
+            tf.setPermissions(tf.permissions() |
+                              QFile::WriteOwner |
+                              QFile::WriteUser);
+        }
+    }
+}
+
+bool
+AudioGenerator::addModel(Model *model)
+{
+    if (m_sourceSampleRate == 0) {
+
+	m_sourceSampleRate = model->getSampleRate();
+
+    } else {
+
+	DenseTimeValueModel *dtvm =
+	    dynamic_cast<DenseTimeValueModel *>(model);
+
+	if (dtvm) {
+	    m_sourceSampleRate = model->getSampleRate();
+	    return true;
+	}
+    }
+
+    const Playable *playable = model;
+    if (!playable || !playable->canPlay()) return 0;
+
+    PlayParameters *parameters =
+	PlayParameterRepository::getInstance()->getPlayParameters(playable);
+
+    bool willPlay = !parameters->isPlayMuted();
+    
+    if (usesClipMixer(model)) {
+        ClipMixer *mixer = makeClipMixerFor(model);
+        if (mixer) {
+            QMutexLocker locker(&m_mutex);
+            m_clipMixerMap[model] = mixer;
+            return willPlay;
+        }
+    }
+
+    if (usesContinuousSynth(model)) {
+        ContinuousSynth *synth = makeSynthFor(model);
+        if (synth) {
+            QMutexLocker locker(&m_mutex);
+            m_continuousSynthMap[model] = synth;
+            return willPlay;
+        }
+    }
+
+    return false;
+}
+
+void
+AudioGenerator::playClipIdChanged(const Playable *playable, QString)
+{
+    const Model *model = dynamic_cast<const Model *>(playable);
+    if (!model) {
+        cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
+                  << playable << " is not a supported model type"
+                  << endl;
+        return;
+    }
+
+    if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
+
+    ClipMixer *mixer = makeClipMixerFor(model);
+    if (mixer) {
+        QMutexLocker locker(&m_mutex);
+        m_clipMixerMap[model] = mixer;
+    }
+}
+
+bool
+AudioGenerator::usesClipMixer(const Model *model)
+{
+    bool clip = 
+        (qobject_cast<const SparseOneDimensionalModel *>(model) ||
+         qobject_cast<const NoteModel *>(model) ||
+         qobject_cast<const FlexiNoteModel *>(model));
+    return clip;
+}
+
+bool
+AudioGenerator::wantsQuieterClips(const Model *model)
+{
+    // basically, anything that usually has sustain (like notes) or
+    // often has multiple sounds at once (like notes) wants to use a
+    // quieter level than simple click tracks
+    bool does = 
+        (qobject_cast<const NoteModel *>(model) ||
+         qobject_cast<const FlexiNoteModel *>(model));
+    return does;
+}
+
+bool
+AudioGenerator::usesContinuousSynth(const Model *model)
+{
+    bool cont = 
+        (qobject_cast<const SparseTimeValueModel *>(model));
+    return cont;
+}
+
+ClipMixer *
+AudioGenerator::makeClipMixerFor(const Model *model)
+{
+    QString clipId;
+
+    const Playable *playable = model;
+    if (!playable || !playable->canPlay()) return 0;
+
+    PlayParameters *parameters =
+	PlayParameterRepository::getInstance()->getPlayParameters(playable);
+    if (parameters) {
+        clipId = parameters->getPlayClipId();
+    }
+
+#ifdef DEBUG_AUDIO_GENERATOR
+    std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
+#endif
+
+    if (clipId == "") {
+        SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
+        return 0;
+    }
+
+    ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
+                                     m_sourceSampleRate,
+                                     m_processingBlockSize);
+
+    double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required
+
+    QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
+
+    double level = wantsQuieterClips(model) ? 0.5 : 1.0;
+    if (!mixer->loadClipData(clipPath, clipF0, level)) {
+        delete mixer;
+        return 0;
+    }
+
+#ifdef DEBUG_AUDIO_GENERATOR
+    std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
+#endif
+
+    return mixer;
+}
+
+ContinuousSynth *
+AudioGenerator::makeSynthFor(const Model *model)
+{
+    const Playable *playable = model;
+    if (!playable || !playable->canPlay()) return 0;
+
+    ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
+                                                 m_sourceSampleRate,
+                                                 m_processingBlockSize,
+                                                 m_waveType);
+
+#ifdef DEBUG_AUDIO_GENERATOR
+    std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
+#endif
+
+    return synth;
+}
+
+void
+AudioGenerator::removeModel(Model *model)
+{
+    SparseOneDimensionalModel *sodm =
+	dynamic_cast<SparseOneDimensionalModel *>(model);
+    if (!sodm) return; // nothing to do
+
+    QMutexLocker locker(&m_mutex);
+
+    if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
+
+    ClipMixer *mixer = m_clipMixerMap[sodm];
+    m_clipMixerMap.erase(sodm);
+    delete mixer;
+}
+
+void
+AudioGenerator::clearModels()
+{
+    QMutexLocker locker(&m_mutex);
+
+    while (!m_clipMixerMap.empty()) {
+        ClipMixer *mixer = m_clipMixerMap.begin()->second;
+	m_clipMixerMap.erase(m_clipMixerMap.begin());
+	delete mixer;
+    }
+}    
+
+void
+AudioGenerator::reset()
+{
+    QMutexLocker locker(&m_mutex);
+
+#ifdef DEBUG_AUDIO_GENERATOR
+    cerr << "AudioGenerator::reset()" << endl;
+#endif
+
+    for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
+	if (i->second) {
+	    i->second->reset();
+	}
+    }
+
+    m_noteOffs.clear();
+}
+
+void
+AudioGenerator::setTargetChannelCount(int targetChannelCount)
+{
+    if (m_targetChannelCount == targetChannelCount) return;
+
+//    SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
+
+    QMutexLocker locker(&m_mutex);
+    m_targetChannelCount = targetChannelCount;
+
+    for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
+	if (i->second) i->second->setChannelCount(targetChannelCount);
+    }
+}
+
+sv_frame_t
+AudioGenerator::getBlockSize() const
+{
+    return m_processingBlockSize;
+}
+
+void
+AudioGenerator::setSoloModelSet(std::set<Model *> s)
+{
+    QMutexLocker locker(&m_mutex);
+
+    m_soloModelSet = s;
+    m_soloing = true;
+}
+
+void
+AudioGenerator::clearSoloModelSet()
+{
+    QMutexLocker locker(&m_mutex);
+
+    m_soloModelSet.clear();
+    m_soloing = false;
+}
+
+sv_frame_t
+AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
+			 float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut)
+{
+    if (m_sourceSampleRate == 0) {
+	cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
+	return frameCount;
+    }
+
+    QMutexLocker locker(&m_mutex);
+
+    Playable *playable = model;
+    if (!playable || !playable->canPlay()) return frameCount;
+
+    PlayParameters *parameters =
+	PlayParameterRepository::getInstance()->getPlayParameters(playable);
+    if (!parameters) return frameCount;
+
+    bool playing = !parameters->isPlayMuted();
+    if (!playing) {
+#ifdef DEBUG_AUDIO_GENERATOR
+        cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
+#endif
+        return frameCount;
+    }
+
+    if (m_soloing) {
+        if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
+#ifdef DEBUG_AUDIO_GENERATOR
+            cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
+#endif
+            return frameCount;
+        }
+    }
+
+    float gain = parameters->getPlayGain();
+    float pan = parameters->getPlayPan();
+
+    DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
+    if (dtvm) {
+	return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
+				      buffer, gain, pan, fadeIn, fadeOut);
+    }
+
+    if (usesClipMixer(model)) {
+        return mixClipModel(model, startFrame, frameCount,
+                            buffer, gain, pan);
+    }
+
+    if (usesContinuousSynth(model)) {
+        return mixContinuousSynthModel(model, startFrame, frameCount,
+                                       buffer, gain, pan);
+    }
+
+    std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
+
+    return frameCount;
+}
+
+sv_frame_t
+AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
+				       sv_frame_t startFrame, sv_frame_t frames,
+				       float **buffer, float gain, float pan,
+				       sv_frame_t fadeIn, sv_frame_t fadeOut)
+{
+    sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut);
+
+    int modelChannels = dtvm->getChannelCount();
+
+    if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
+
+        for (int c = 0; c < m_channelBufCount; ++c) {
+            delete[] m_channelBuffer[c];
+        }
+
+	delete[] m_channelBuffer;
+        m_channelBuffer = new float *[modelChannels];
+
+        for (int c = 0; c < modelChannels; ++c) {
+            m_channelBuffer[c] = new float[maxFrames];
+        }
+
+        m_channelBufCount = modelChannels;
+	m_channelBufSiz = maxFrames;
+    }
+
+    sv_frame_t got = 0;
+
+    if (startFrame >= fadeIn/2) {
+        got = dtvm->getMultiChannelData(0, modelChannels - 1,
+                                        startFrame - fadeIn/2,
+                                        frames + fadeOut/2 + fadeIn/2,
+                                        m_channelBuffer);
+    } else {
+        sv_frame_t missing = fadeIn/2 - startFrame;
+
+        for (int c = 0; c < modelChannels; ++c) {
+            m_channelBuffer[c] += missing;
+        }
+
+        if (missing > 0) {
+            cerr << "note: channelBufSiz = " << m_channelBufSiz
+                 << ", frames + fadeOut/2 = " << frames + fadeOut/2 
+                 << ", startFrame = " << startFrame 
+                 << ", missing = " << missing << endl;
+        }
+
+        got = dtvm->getMultiChannelData(0, modelChannels - 1,
+                                        startFrame,
+                                        frames + fadeOut/2,
+                                        m_channelBuffer);
+
+        for (int c = 0; c < modelChannels; ++c) {
+            m_channelBuffer[c] -= missing;
+        }
+
+        got += missing;
+    }	    
+
+    for (int c = 0; c < m_targetChannelCount; ++c) {
+
+	int sourceChannel = (c % modelChannels);
+
+//	SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
+
+	float channelGain = gain;
+	if (pan != 0.0) {
+	    if (c == 0) {
+		if (pan > 0.0) channelGain *= 1.0f - pan;
+	    } else {
+		if (pan < 0.0) channelGain *= pan + 1.0f;
+	    }
+	}
+
+	for (sv_frame_t i = 0; i < fadeIn/2; ++i) {
+	    float *back = buffer[c];
+	    back -= fadeIn/2;
+	    back[i] +=
+                (channelGain * m_channelBuffer[sourceChannel][i] * float(i))
+                / float(fadeIn);
+	}
+
+	for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) {
+	    float mult = channelGain;
+	    if (i < fadeIn/2) {
+		mult = (mult * float(i)) / float(fadeIn);
+	    }
+	    if (i > frames - fadeOut/2) {
+		mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut);
+	    }
+            float val = m_channelBuffer[sourceChannel][i];
+            if (i >= got) val = 0.f;
+	    buffer[c][i] += mult * val;
+	}
+    }
+
+    return got;
+}
+  
+sv_frame_t
+AudioGenerator::mixClipModel(Model *model,
+                             sv_frame_t startFrame, sv_frame_t frames,
+                             float **buffer, float gain, float pan)
+{
+    ClipMixer *clipMixer = m_clipMixerMap[model];
+    if (!clipMixer) return 0;
+
+    int blocks = int(frames / m_processingBlockSize);
+    
+    //!!! todo: the below -- it matters
+
+    //!!! hang on -- the fact that the audio callback play source's
+    //buffer is a multiple of the plugin's buffer size doesn't mean
+    //that we always get called for a multiple of it here (because it
+    //also depends on the JACK block size).  how should we ensure that
+    //all models write the same amount in to the mix, and that we
+    //always have a multiple of the plugin buffer size?  I guess this
+    //class has to be queryable for the plugin buffer size & the
+    //callback play source has to use that as a multiple for all the
+    //calls to mixModel
+
+    sv_frame_t got = blocks * m_processingBlockSize;
+
+#ifdef DEBUG_AUDIO_GENERATOR
+    cout << "mixModel [clip]: start " << startFrame << ", frames " << frames
+         << ", blocks " << blocks << ", have " << m_noteOffs.size()
+         << " note-offs" << endl;
+#endif
+
+    ClipMixer::NoteStart on;
+    ClipMixer::NoteEnd off;
+
+    NoteOffSet &noteOffs = m_noteOffs[model];
+
+    float **bufferIndexes = new float *[m_targetChannelCount];
+
+    for (int i = 0; i < blocks; ++i) {
+
+	sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
+
+        NoteList notes;
+        NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
+        if (exportable) {
+            notes = exportable->getNotesWithin(reqStart,
+                                               reqStart + m_processingBlockSize);
+        }
+
+        std::vector<ClipMixer::NoteStart> starts;
+        std::vector<ClipMixer::NoteEnd> ends;
+
+	for (NoteList::const_iterator ni = notes.begin();
+             ni != notes.end(); ++ni) {
+
+	    sv_frame_t noteFrame = ni->start;
+
+	    if (noteFrame < reqStart ||
+		noteFrame >= reqStart + m_processingBlockSize) continue;
+
+	    while (noteOffs.begin() != noteOffs.end() &&
+		   noteOffs.begin()->frame <= noteFrame) {
+
+                sv_frame_t eventFrame = noteOffs.begin()->frame;
+                if (eventFrame < reqStart) eventFrame = reqStart;
+
+                off.frameOffset = eventFrame - reqStart;
+                off.frequency = noteOffs.begin()->frequency;
+
+#ifdef DEBUG_AUDIO_GENERATOR
+		cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
+#endif
+
+                ends.push_back(off);
+		noteOffs.erase(noteOffs.begin());
+	    }
+
+            on.frameOffset = noteFrame - reqStart;
+            on.frequency = ni->getFrequency();
+            on.level = float(ni->velocity) / 127.0f;
+            on.pan = pan;
+
+#ifdef DEBUG_AUDIO_GENERATOR
+	    cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
+#endif
+	    
+            starts.push_back(on);
+	    noteOffs.insert
+                (NoteOff(on.frequency, noteFrame + ni->duration));
+	}
+
+	while (noteOffs.begin() != noteOffs.end() &&
+	       noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
+
+            sv_frame_t eventFrame = noteOffs.begin()->frame;
+            if (eventFrame < reqStart) eventFrame = reqStart;
+
+            off.frameOffset = eventFrame - reqStart;
+            off.frequency = noteOffs.begin()->frequency;
+
+#ifdef DEBUG_AUDIO_GENERATOR
+            cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
+#endif
+
+            ends.push_back(off);
+            noteOffs.erase(noteOffs.begin());
+	}
+
+	for (int c = 0; c < m_targetChannelCount; ++c) {
+            bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
+        }
+
+        clipMixer->mix(bufferIndexes, gain, starts, ends);
+    }
+
+    delete[] bufferIndexes;
+
+    return got;
+}
+
+sv_frame_t
+AudioGenerator::mixContinuousSynthModel(Model *model,
+                                        sv_frame_t startFrame,
+                                        sv_frame_t frames,
+                                        float **buffer,
+                                        float gain, 
+                                        float pan)
+{
+    ContinuousSynth *synth = m_continuousSynthMap[model];
+    if (!synth) return 0;
+
+    // only type we support here at the moment
+    SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
+    if (stvm->getScaleUnits() != "Hz") return 0;
+
+    int blocks = int(frames / m_processingBlockSize);
+
+    //!!! todo: see comment in mixClipModel
+
+    sv_frame_t got = blocks * m_processingBlockSize;
+
+#ifdef DEBUG_AUDIO_GENERATOR
+    cout << "mixModel [synth]: frames " << frames
+	      << ", blocks " << blocks << endl;
+#endif
+    
+    float **bufferIndexes = new float *[m_targetChannelCount];
+
+    for (int i = 0; i < blocks; ++i) {
+
+	sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
+
+	for (int c = 0; c < m_targetChannelCount; ++c) {
+            bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
+        }
+
+        SparseTimeValueModel::PointList points = 
+            stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
+
+        // by default, repeat last frequency
+        float f0 = 0.f;
+
+        // go straight to the last freq that is genuinely in this range
+        for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
+             itr != points.begin(); ) {
+            --itr;
+            if (itr->frame >= reqStart &&
+                itr->frame < reqStart + m_processingBlockSize) {
+                f0 = itr->value;
+                break;
+            }
+        }
+
+        // if we found no such frequency and the next point is further
+        // away than twice the model resolution, go silent (same
+        // criterion TimeValueLayer uses for ending a discrete curve
+        // segment)
+        if (f0 == 0.f) {
+            SparseTimeValueModel::PointList nextPoints = 
+                stvm->getNextPoints(reqStart + m_processingBlockSize);
+            if (nextPoints.empty() ||
+                nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
+                f0 = -1.f;
+            }
+        }
+
+//        cerr << "f0 = " << f0 << endl;
+
+        synth->mix(bufferIndexes,
+                   gain,
+                   pan,
+                   f0);
+    }
+
+    delete[] bufferIndexes;
+
+    return got;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audio/AudioGenerator.h	Tue Aug 04 13:27:42 2015 +0100
@@ -0,0 +1,168 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef _AUDIO_GENERATOR_H_
+#define _AUDIO_GENERATOR_H_
+
+class Model;
+class NoteModel;
+class FlexiNoteModel;
+class DenseTimeValueModel;
+class SparseOneDimensionalModel;
+class Playable;
+class ClipMixer;
+class ContinuousSynth;
+
+#include <QObject>
+#include <QMutex>
+
+#include <set>
+#include <map>
+#include <vector>
+
+#include "base/BaseTypes.h"
+
+class AudioGenerator : public QObject
+{
+    Q_OBJECT
+
+public:
+    AudioGenerator();
+    virtual ~AudioGenerator();
+
+    /**
+     * Add a data model to be played from and initialise any necessary
+     * audio generation code.  Returns true if the model will be
+     * played.  The model will be added regardless of the return
+     * value.
+     */
+    virtual bool addModel(Model *model);
+
+    /**
+     * Remove a model.
+     */
+    virtual void removeModel(Model *model);
+
+    /**
+     * Remove all models.
+     */
+    virtual void clearModels();
+
+    /**
+     * Reset playback, clearing buffers and the like.
+     */
+    virtual void reset();
+
+    /**
+     * Set the target channel count.  The buffer parameter to mixModel
+     * must always point to at least this number of arrays.
+     */
+    virtual void setTargetChannelCount(int channelCount);
+
+    /**
+     * Return the internal processing block size.  The frameCount
+     * argument to all mixModel calls must be a multiple of this
+     * value.
+     */
+    virtual sv_frame_t getBlockSize() const;
+
+    /**
+     * Mix a single model into an output buffer.
+     */
+    virtual sv_frame_t mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
+			    float **buffer, sv_frame_t fadeIn = 0, sv_frame_t fadeOut = 0);
+
+    /**
+     * Specify that only the given set of models should be played.
+     */
+    virtual void setSoloModelSet(std::set<Model *>s);
+
+    /**
+     * Specify that all models should be played as normal (if not
+     * muted).
+     */
+    virtual void clearSoloModelSet();
+
+protected slots:
+    void playClipIdChanged(const Playable *, QString);
+
+protected:
+    sv_samplerate_t m_sourceSampleRate;
+    int m_targetChannelCount;
+    int m_waveType;
+
+    bool m_soloing;
+    std::set<Model *> m_soloModelSet;
+
+    struct NoteOff {
+
+        NoteOff(float _freq, sv_frame_t _frame) : frequency(_freq), frame(_frame) { }
+
+        float frequency;
+	sv_frame_t frame;
+
+	struct Comparator {
+	    bool operator()(const NoteOff &n1, const NoteOff &n2) const {
+		return n1.frame < n2.frame;
+	    }
+	};
+    };
+
+
+    typedef std::map<const Model *, ClipMixer *> ClipMixerMap;
+
+    typedef std::multiset<NoteOff, NoteOff::Comparator> NoteOffSet;
+    typedef std::map<const Model *, NoteOffSet> NoteOffMap;
+
+    typedef std::map<const Model *, ContinuousSynth *> ContinuousSynthMap;
+
+    QMutex m_mutex;
+
+    ClipMixerMap m_clipMixerMap;
+    NoteOffMap m_noteOffs;
+    static QString m_sampleDir;
+
+    ContinuousSynthMap m_continuousSynthMap;
+
+    bool usesClipMixer(const Model *);
+    bool wantsQuieterClips(const Model *);
+    bool usesContinuousSynth(const Model *);
+
+    ClipMixer *makeClipMixerFor(const Model *model);
+    ContinuousSynth *makeSynthFor(const Model *model);
+
+    static void initialiseSampleDir();
+
+    virtual sv_frame_t mixDenseTimeValueModel
+    (DenseTimeValueModel *model, sv_frame_t startFrame, sv_frame_t frameCount,
+     float **buffer, float gain, float pan, sv_frame_t fadeIn, sv_frame_t fadeOut);
+
+    virtual sv_frame_t mixClipModel
+    (Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
+     float **buffer, float gain, float pan);
+
+    virtual sv_frame_t mixContinuousSynthModel
+    (Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
+     float **buffer, float gain, float pan);
+    
+    static const sv_frame_t m_processingBlockSize;
+
+    float **m_channelBuffer;
+    sv_frame_t m_channelBufSiz;
+    int m_channelBufCount;
+};
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audio/ClipMixer.cpp	Tue Aug 04 13:27:42 2015 +0100
@@ -0,0 +1,248 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam, 2006-2014 QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "ClipMixer.h"
+
+#include <sndfile.h>
+#include <cmath>
+
+#include "base/Debug.h"
+
+//#define DEBUG_CLIP_MIXER 1
+
+ClipMixer::ClipMixer(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize) :
+    m_channels(channels),
+    m_sampleRate(sampleRate),
+    m_blockSize(blockSize),
+    m_clipData(0),
+    m_clipLength(0),
+    m_clipF0(0),
+    m_clipRate(0)
+{
+}
+
+ClipMixer::~ClipMixer()
+{
+    if (m_clipData) free(m_clipData);
+}
+
+void
+ClipMixer::setChannelCount(int channels)
+{
+    m_channels = channels;
+}
+
+bool
+ClipMixer::loadClipData(QString path, double f0, double level)
+{
+    if (m_clipData) {
+        cerr << "ClipMixer::loadClipData: Already have clip loaded" << endl;
+        return false;
+    }
+
+    SF_INFO info;
+    SNDFILE *file;
+    float *tmpFrames;
+    sv_frame_t i;
+
+    info.format = 0;
+    file = sf_open(path.toLocal8Bit().data(), SFM_READ, &info);
+    if (!file) {
+	cerr << "ClipMixer::loadClipData: Failed to open file path \""
+             << path << "\": " << sf_strerror(file) << endl;
+	return false;
+    }
+
+    tmpFrames = (float *)malloc(info.frames * info.channels * sizeof(float));
+    if (!tmpFrames) {
+        cerr << "ClipMixer::loadClipData: malloc(" << info.frames * info.channels * sizeof(float) << ") failed" << endl;
+        return false;
+    }
+
+    sf_readf_float(file, tmpFrames, info.frames);
+    sf_close(file);
+
+    m_clipData = (float *)malloc(info.frames * sizeof(float));
+    if (!m_clipData) {
+        cerr << "ClipMixer::loadClipData: malloc(" << info.frames * sizeof(float) << ") failed" << endl;
+	free(tmpFrames);
+	return false;
+    }
+
+    for (i = 0; i < info.frames; ++i) {
+	int j;
+	m_clipData[i] = 0.0f;
+	for (j = 0; j < info.channels; ++j) {
+	    m_clipData[i] += tmpFrames[i * info.channels + j] * float(level);
+	}
+    }
+
+    free(tmpFrames);
+
+    m_clipLength = info.frames;
+    m_clipF0 = f0;
+    m_clipRate = info.samplerate;
+
+    return true;
+}
+
+void
+ClipMixer::reset()
+{
+    m_playing.clear();
+}
+
+double
+ClipMixer::getResampleRatioFor(double frequency)
+{
+    if (!m_clipData || !m_clipRate) return 1.0;
+    double pitchRatio = m_clipF0 / frequency;
+    double resampleRatio = m_sampleRate / m_clipRate;
+    return pitchRatio * resampleRatio;
+}
+
+sv_frame_t
+ClipMixer::getResampledClipDuration(double frequency)
+{
+    return sv_frame_t(ceil(double(m_clipLength) * getResampleRatioFor(frequency)));
+}
+
+void
+ClipMixer::mix(float **toBuffers, 
+               float gain,
+               std::vector<NoteStart> newNotes, 
+               std::vector<NoteEnd> endingNotes)
+{
+    foreach (NoteStart note, newNotes) {
+        if (note.frequency > 20 && 
+            note.frequency < 5000) {
+            m_playing.push_back(note);
+        }
+    }
+
+    std::vector<NoteStart> remaining;
+
+    float *levels = new float[m_channels];
+
+#ifdef DEBUG_CLIP_MIXER
+    cerr << "ClipMixer::mix: have " << m_playing.size() << " playing note(s)"
+         << " and " << endingNotes.size() << " note(s) ending here"
+         << endl;
+#endif
+
+    foreach (NoteStart note, m_playing) {
+
+        for (int c = 0; c < m_channels; ++c) {
+            levels[c] = note.level * gain;
+        }
+        if (note.pan != 0.0 && m_channels == 2) {
+            levels[0] *= 1.0f - note.pan;
+            levels[1] *= note.pan + 1.0f;
+        }
+
+        sv_frame_t start = note.frameOffset;
+        sv_frame_t durationHere = m_blockSize;
+        if (start > 0) durationHere = m_blockSize - start;
+
+        bool ending = false;
+
+        foreach (NoteEnd end, endingNotes) {
+            if (end.frequency == note.frequency && 
+                end.frameOffset >= start &&
+                end.frameOffset <= m_blockSize) {
+                ending = true;
+                durationHere = end.frameOffset;
+                if (start > 0) durationHere = end.frameOffset - start;
+                break;
+            }
+        }
+
+        sv_frame_t clipDuration = getResampledClipDuration(note.frequency);
+        if (start + clipDuration > 0) {
+            if (start < 0 && start + clipDuration < durationHere) {
+                durationHere = start + clipDuration;
+            }
+            if (durationHere > 0) {
+                mixNote(toBuffers,
+                        levels,
+                        note.frequency,
+                        start < 0 ? -start : 0,
+                        start > 0 ?  start : 0,
+                        durationHere,
+                        ending);
+            }
+        }
+
+        if (!ending) {
+            NoteStart adjusted = note;
+            adjusted.frameOffset -= m_blockSize;
+            remaining.push_back(adjusted);
+        }
+    }
+
+    delete[] levels;
+
+    m_playing = remaining;
+}
+
+void
+ClipMixer::mixNote(float **toBuffers,
+                   float *levels,
+                   float frequency,
+                   sv_frame_t sourceOffset,
+                   sv_frame_t targetOffset,
+                   sv_frame_t sampleCount,
+                   bool isEnd)
+{
+    if (!m_clipData) return;
+
+    double ratio = getResampleRatioFor(frequency);
+    
+    double releaseTime = 0.01;
+    sv_frame_t releaseSampleCount = sv_frame_t(round(releaseTime * m_sampleRate));
+    if (releaseSampleCount > sampleCount) {
+        releaseSampleCount = sampleCount;
+    }
+    double releaseFraction = 1.0/double(releaseSampleCount);
+
+    for (sv_frame_t i = 0; i < sampleCount; ++i) {
+
+        sv_frame_t s = sourceOffset + i;
+
+        double os = double(s) / ratio;
+        sv_frame_t osi = sv_frame_t(floor(os));
+
+        //!!! just linear interpolation for now (same as SV's sample
+        //!!! player). a small sinc kernel would be better and
+        //!!! probably "good enough"
+        double value = 0.0;
+        if (osi < m_clipLength) {
+            value += m_clipData[osi];
+        }
+        if (osi + 1 < m_clipLength) {
+            value += (m_clipData[osi + 1] - m_clipData[osi]) * (os - double(osi));
+        }
+         
+        if (isEnd && i + releaseSampleCount > sampleCount) {
+            value *= releaseFraction * double(sampleCount - i); // linear ramp for release
+        }
+
+        for (int c = 0; c < m_channels; ++c) {
+            toBuffers[c][targetOffset + i] += float(levels[c] * value);
+        }
+    }
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audio/ClipMixer.h	Tue Aug 04 13:27:42 2015 +0100
@@ -0,0 +1,94 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam, 2006-2014 QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef CLIP_MIXER_H
+#define CLIP_MIXER_H
+
+#include <QString>
+#include <vector>
+
+#include "base/BaseTypes.h"
+
+/**
+ * Mix in synthetic notes produced by resampling a prerecorded
+ * clip. (i.e. this is an implementation of a digital sampler in the
+ * musician's sense.) This can mix any number of notes of arbitrary
+ * frequency, so long as they all use the same sample clip.
+ */
+
+class ClipMixer
+{
+public:
+    ClipMixer(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize);
+    ~ClipMixer();
+
+    void setChannelCount(int channels);
+
+    /**
+     * Load a sample clip from a wav file. This can only happen once:
+     * construct a new ClipMixer if you want a different clip. The
+     * clip was recorded at a pitch with fundamental frequency clipF0,
+     * and should be scaled by level (in the range 0-1) when playing
+     * back.
+     */
+    bool loadClipData(QString clipFilePath, double clipF0, double level);
+
+    void reset(); // discarding any playing notes
+
+    struct NoteStart {
+	sv_frame_t frameOffset; // within current processing block
+	float frequency; // Hz
+	float level; // volume in range (0,1]
+	float pan; // range [-1,1]
+    };
+
+    struct NoteEnd {
+	sv_frame_t frameOffset; // in current processing block
+        float frequency; // matching note start
+    };
+
+    void mix(float **toBuffers, 
+             float gain,
+	     std::vector<NoteStart> newNotes, 
+	     std::vector<NoteEnd> endingNotes);
+
+private:
+    int m_channels;
+    sv_samplerate_t m_sampleRate;
+    sv_frame_t m_blockSize;
+
+    QString m_clipPath;
+
+    float *m_clipData;
+    sv_frame_t m_clipLength;
+    double m_clipF0;
+    sv_samplerate_t m_clipRate;
+
+    std::vector<NoteStart> m_playing;
+
+    double getResampleRatioFor(double frequency);
+    sv_frame_t getResampledClipDuration(double frequency);
+
+    void mixNote(float **toBuffers, 
+                 float *levels,
+                 float frequency,
+                 sv_frame_t sourceOffset, // within resampled note
+                 sv_frame_t targetOffset, // within target buffer
+                 sv_frame_t sampleCount,
+                 bool isEnd);
+};
+
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audio/ContinuousSynth.cpp	Tue Aug 04 13:27:42 2015 +0100
@@ -0,0 +1,149 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "ContinuousSynth.h"
+
+#include "base/Debug.h"
+#include "system/System.h"
+
+#include <cmath>
+
+ContinuousSynth::ContinuousSynth(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize, int waveType) :
+    m_channels(channels),
+    m_sampleRate(sampleRate),
+    m_blockSize(blockSize),
+    m_prevF0(-1.0),
+    m_phase(0.0),
+    m_wavetype(waveType) // 0: 3 sinusoids, 1: 1 sinusoid, 2: sawtooth, 3: square
+{
+}
+
+ContinuousSynth::~ContinuousSynth()
+{
+}
+
+void
+ContinuousSynth::reset()
+{
+    m_phase = 0;
+}
+
+void
+ContinuousSynth::mix(float **toBuffers, float gain, float pan, float f0f)
+{
+    double f0(f0f);
+    if (f0 == 0.0) f0 = m_prevF0;
+
+    bool wasOn = (m_prevF0 > 0.0);
+    bool nowOn = (f0 > 0.0);
+
+    if (!nowOn && !wasOn) {
+        m_phase = 0;
+        return;
+    }
+
+    sv_frame_t fadeLength = 100;
+
+    float *levels = new float[m_channels];
+    
+    for (int c = 0; c < m_channels; ++c) {
+        levels[c] = gain * 0.5f; // scale gain otherwise too loud compared to source
+    }
+    if (pan != 0.0 && m_channels == 2) {
+        levels[0] *= 1.0f - pan;
+        levels[1] *= pan + 1.0f;
+    }
+
+//    cerr << "ContinuousSynth::mix: f0 = " << f0 << " (from " << m_prevF0 << "), phase = " << m_phase << endl;
+
+    for (sv_frame_t i = 0; i < m_blockSize; ++i) {
+
+        double fHere = (nowOn ? f0 : m_prevF0);
+
+        if (wasOn && nowOn && (f0 != m_prevF0) && (i < fadeLength)) {
+            // interpolate the frequency shift
+            fHere = m_prevF0 + ((f0 - m_prevF0) * double(i)) / double(fadeLength);
+        }
+
+        double phasor = (fHere * 2 * M_PI) / m_sampleRate;
+    
+        m_phase = m_phase + phasor;
+
+        int harmonics = int((m_sampleRate / 4) / fHere - 1);
+        if (harmonics < 1) harmonics = 1;
+
+        switch (m_wavetype) {
+        case 1:
+            harmonics = 1;
+            break;
+        case 2:
+            break;
+        case 3:
+            break;
+        default:
+            harmonics = 3;
+            break;
+        }
+
+        for (int h = 0; h < harmonics; ++h) {
+
+            double v = 0;
+            double hn = 0;
+            double hp = 0;
+
+            switch (m_wavetype) {
+            case 1: // single sinusoid
+                v = sin(m_phase);
+                break;
+            case 2: // sawtooth
+                if (h != 0) {
+                    hn = h + 1;
+                    hp = m_phase * hn;
+                    v = -(1.0 / M_PI) * sin(hp) / hn;
+                } else {
+                    v = 0.5;
+                }
+                break;
+            case 3: // square
+                hn = h*2 + 1;
+                hp = m_phase * hn;
+                v = sin(hp) / hn;
+                break;
+            default: // 3 sinusoids
+                hn = h + 1;
+                hp = m_phase * hn;
+                v = sin(hp) / hn;
+                break;
+            }
+
+            if (!wasOn && i < fadeLength) {
+                // fade in
+                v = v * (double(i) / double(fadeLength));
+            } else if (!nowOn) {
+                // fade out
+                if (i > fadeLength) v = 0;
+                else v = v * (1.0 - (double(i) / double(fadeLength)));
+            }
+
+            for (int c = 0; c < m_channels; ++c) {
+                toBuffers[c][i] += float(levels[c] * v);
+            }
+        }
+    }    
+
+    m_prevF0 = f0;
+
+    delete[] levels;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audio/ContinuousSynth.h	Tue Aug 04 13:27:42 2015 +0100
@@ -0,0 +1,65 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef CONTINUOUS_SYNTH_H
+#define CONTINUOUS_SYNTH_H
+
+#include "base/BaseTypes.h"
+
+/**
+ * Mix into a target buffer a signal synthesised so as to sound at a
+ * specific frequency. The frequency may change with each processing
+ * block, or may be switched on or off.
+ */
+
+class ContinuousSynth
+{
+public:
+    ContinuousSynth(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize, int waveType);
+    ~ContinuousSynth();
+    
+    void setChannelCount(int channels);
+
+    void reset();
+
+    /**
+     * Mix in a signal to be heard at the given fundamental
+     * frequency. Any oscillator state will be maintained between
+     * process calls so as to provide a continuous sound. The f0 value
+     * may vary between calls.
+     *
+     * Supply f0 equal to 0 if you want to maintain the f0 from the
+     * previous block (without having to remember what it was).
+     *
+     * Supply f0 less than 0 for silence. You should continue to call
+     * this even when the signal is silent if you want to ensure the
+     * sound switches on and off cleanly.
+     */
+    void mix(float **toBuffers,
+             float gain,
+             float pan,
+             float f0);
+
+private:
+    int m_channels;
+    sv_samplerate_t m_sampleRate;
+    sv_frame_t m_blockSize;
+
+    double m_prevF0;
+    double m_phase;
+
+    int m_wavetype;
+};
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audio/PlaySpeedRangeMapper.cpp	Tue Aug 04 13:27:42 2015 +0100
@@ -0,0 +1,101 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "PlaySpeedRangeMapper.h"
+
+#include <iostream>
+#include <cmath>
+
+// PlaySpeedRangeMapper maps a position in the range [0,120] on to a
+// play speed factor on a logarithmic scale in the range 0.125 ->
+// 8. This ensures that the desirable speed factors 0.25, 0.5, 1, 2,
+// and 4 are all mapped to exact positions (respectively 20, 40, 60,
+// 80, 100).
+
+// Note that the "factor" referred to below is a play speed factor
+// (higher = faster, 1.0 = normal speed), the "value" is a percentage
+// (higher = faster, 100 = normal speed), and the "position" is an
+// integer step on the dial's scale (0-120, 60 = centre).
+
+PlaySpeedRangeMapper::PlaySpeedRangeMapper() :
+    m_minpos(0),
+    m_maxpos(120)
+{
+}
+
+int
+PlaySpeedRangeMapper::getPositionForValue(double value) const
+{
+    // value is percent
+    double factor = getFactorForValue(value);
+    int position = getPositionForFactor(factor);
+    return position;
+}
+
+int
+PlaySpeedRangeMapper::getPositionForValueUnclamped(double value) const
+{
+    // We don't really provide this
+    return getPositionForValue(value);
+}
+
+double
+PlaySpeedRangeMapper::getValueForPosition(int position) const
+{
+    double factor = getFactorForPosition(position);
+    double pc = getValueForFactor(factor);
+    return pc;
+}
+
+double
+PlaySpeedRangeMapper::getValueForPositionUnclamped(int position) const
+{
+    // We don't really provide this
+    return getValueForPosition(position);
+}
+
+double
+PlaySpeedRangeMapper::getValueForFactor(double factor) const
+{
+    return factor * 100.0;
+}
+
+double
+PlaySpeedRangeMapper::getFactorForValue(double value) const
+{
+    return value / 100.0;
+}
+
+int
+PlaySpeedRangeMapper::getPositionForFactor(double factor) const
+{
+    if (factor == 0) return m_minpos;
+    int pos = int(lrint((log2(factor) + 3.0) * 20.0));
+    if (pos < m_minpos) pos = m_minpos;
+    if (pos > m_maxpos) pos = m_maxpos;
+    return pos;
+}
+
+double
+PlaySpeedRangeMapper::getFactorForPosition(int position) const
+{
+    return pow(2.0, double(position) * 0.05 - 3.0);
+}
+
+QString
+PlaySpeedRangeMapper::getUnit() const
+{
+    return "%";
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audio/PlaySpeedRangeMapper.h	Tue Aug 04 13:27:42 2015 +0100
@@ -0,0 +1,49 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef _PLAY_SPEED_RANGE_MAPPER_H_
+#define _PLAY_SPEED_RANGE_MAPPER_H_
+
+#include "base/RangeMapper.h"
+
+class PlaySpeedRangeMapper : public RangeMapper
+{
+public:
+    PlaySpeedRangeMapper();
+
+    int getMinPosition() const { return m_minpos; }
+    int getMaxPosition() const { return m_maxpos; }
+    
+    virtual int getPositionForValue(double value) const;
+    virtual int getPositionForValueUnclamped(double value) const;
+
+    virtual double getValueForPosition(int position) const;
+    virtual double getValueForPositionUnclamped(int position) const;
+
+    int getPositionForFactor(double factor) const;
+    double getValueForFactor(double factor) const;
+
+    double getFactorForPosition(int position) const;
+    double getFactorForValue(double value) const;
+
+    virtual QString getUnit() const;
+    
+protected:
+    int m_minpos;
+    int m_maxpos;
+};
+
+
+#endif
--- a/audioio/AudioCallbackPlaySource.cpp	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1897 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam and QMUL.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#include "AudioCallbackPlaySource.h"
-
-#include "AudioGenerator.h"
-
-#include "data/model/Model.h"
-#include "base/ViewManagerBase.h"
-#include "base/PlayParameterRepository.h"
-#include "base/Preferences.h"
-#include "data/model/DenseTimeValueModel.h"
-#include "data/model/WaveFileModel.h"
-#include "data/model/SparseOneDimensionalModel.h"
-#include "plugin/RealTimePluginInstance.h"
-
-#include "AudioCallbackPlayTarget.h"
-
-#include <rubberband/RubberBandStretcher.h>
-using namespace RubberBand;
-
-#include <iostream>
-#include <cassert>
-
-//#define DEBUG_AUDIO_PLAY_SOURCE 1
-//#define DEBUG_AUDIO_PLAY_SOURCE_PLAYING 1
-
-static const int DEFAULT_RING_BUFFER_SIZE = 131071;
-
-AudioCallbackPlaySource::AudioCallbackPlaySource(ViewManagerBase *manager,
-                                                 QString clientName) :
-    m_viewManager(manager),
-    m_audioGenerator(new AudioGenerator()),
-    m_clientName(clientName),
-    m_readBuffers(0),
-    m_writeBuffers(0),
-    m_readBufferFill(0),
-    m_writeBufferFill(0),
-    m_bufferScavenger(1),
-    m_sourceChannelCount(0),
-    m_blockSize(1024),
-    m_sourceSampleRate(0),
-    m_targetSampleRate(0),
-    m_playLatency(0),
-    m_target(0),
-    m_lastRetrievalTimestamp(0.0),
-    m_lastRetrievedBlockSize(0),
-    m_trustworthyTimestamps(true),
-    m_lastCurrentFrame(0),
-    m_playing(false),
-    m_exiting(false),
-    m_lastModelEndFrame(0),
-    m_ringBufferSize(DEFAULT_RING_BUFFER_SIZE),
-    m_outputLeft(0.0),
-    m_outputRight(0.0),
-    m_auditioningPlugin(0),
-    m_auditioningPluginBypassed(false),
-    m_playStartFrame(0),
-    m_playStartFramePassed(false),
-    m_timeStretcher(0),
-    m_monoStretcher(0),
-    m_stretchRatio(1.0),
-    m_stretchMono(false),
-    m_stretcherInputCount(0),
-    m_stretcherInputs(0),
-    m_stretcherInputSizes(0),
-    m_fillThread(0),
-    m_converter(0),
-    m_crapConverter(0),
-    m_resampleQuality(Preferences::getInstance()->getResampleQuality())
-{
-    m_viewManager->setAudioPlaySource(this);
-
-    connect(m_viewManager, SIGNAL(selectionChanged()),
-	    this, SLOT(selectionChanged()));
-    connect(m_viewManager, SIGNAL(playLoopModeChanged()),
-	    this, SLOT(playLoopModeChanged()));
-    connect(m_viewManager, SIGNAL(playSelectionModeChanged()),
-	    this, SLOT(playSelectionModeChanged()));
-
-    connect(this, SIGNAL(playStatusChanged(bool)),
-            m_viewManager, SLOT(playStatusChanged(bool)));
-
-    connect(PlayParameterRepository::getInstance(),
-	    SIGNAL(playParametersChanged(PlayParameters *)),
-	    this, SLOT(playParametersChanged(PlayParameters *)));
-
-    connect(Preferences::getInstance(),
-            SIGNAL(propertyChanged(PropertyContainer::PropertyName)),
-            this, SLOT(preferenceChanged(PropertyContainer::PropertyName)));
-}
-
-AudioCallbackPlaySource::~AudioCallbackPlaySource()
-{
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    SVDEBUG << "AudioCallbackPlaySource::~AudioCallbackPlaySource entering" << endl;
-#endif
-    m_exiting = true;
-
-    if (m_fillThread) {
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "AudioCallbackPlaySource dtor: awakening thread" << endl;
-#endif
-        m_condition.wakeAll();
-	m_fillThread->wait();
-	delete m_fillThread;
-    }
-
-    clearModels();
-    
-    if (m_readBuffers != m_writeBuffers) {
-	delete m_readBuffers;
-    }
-
-    delete m_writeBuffers;
-
-    delete m_audioGenerator;
-
-    for (int i = 0; i < m_stretcherInputCount; ++i) {
-        delete[] m_stretcherInputs[i];
-    }
-    delete[] m_stretcherInputSizes;
-    delete[] m_stretcherInputs;
-
-    delete m_timeStretcher;
-    delete m_monoStretcher;
-
-    m_bufferScavenger.scavenge(true);
-    m_pluginScavenger.scavenge(true);
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    SVDEBUG << "AudioCallbackPlaySource::~AudioCallbackPlaySource finishing" << endl;
-#endif
-}
-
-void
-AudioCallbackPlaySource::addModel(Model *model)
-{
-    if (m_models.find(model) != m_models.end()) return;
-
-    bool willPlay = m_audioGenerator->addModel(model);
-
-    m_mutex.lock();
-
-    m_models.insert(model);
-    if (model->getEndFrame() > m_lastModelEndFrame) {
-	m_lastModelEndFrame = model->getEndFrame();
-    }
-
-    bool buffersChanged = false, srChanged = false;
-
-    int modelChannels = 1;
-    DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
-    if (dtvm) modelChannels = dtvm->getChannelCount();
-    if (modelChannels > m_sourceChannelCount) {
-	m_sourceChannelCount = modelChannels;
-    }
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "AudioCallbackPlaySource: Adding model with " << modelChannels << " channels at rate " << model->getSampleRate() << endl;
-#endif
-
-    if (m_sourceSampleRate == 0) {
-
-	m_sourceSampleRate = model->getSampleRate();
-	srChanged = true;
-
-    } else if (model->getSampleRate() != m_sourceSampleRate) {
-
-        // If this is a dense time-value model and we have no other, we
-        // can just switch to this model's sample rate
-
-        if (dtvm) {
-
-            bool conflicting = false;
-
-            for (std::set<Model *>::const_iterator i = m_models.begin();
-                 i != m_models.end(); ++i) {
-                // Only wave file models can be considered conflicting --
-                // writable wave file models are derived and we shouldn't
-                // take their rates into account.  Also, don't give any
-                // particular weight to a file that's already playing at
-                // the wrong rate anyway
-                WaveFileModel *wfm = dynamic_cast<WaveFileModel *>(*i);
-                if (wfm && wfm != dtvm &&
-                    wfm->getSampleRate() != model->getSampleRate() &&
-                    wfm->getSampleRate() == m_sourceSampleRate) {
-                    SVDEBUG << "AudioCallbackPlaySource::addModel: Conflicting wave file model " << *i << " found" << endl;
-                    conflicting = true;
-                    break;
-                }
-            }
-
-            if (conflicting) {
-
-                SVDEBUG << "AudioCallbackPlaySource::addModel: ERROR: "
-                          << "New model sample rate does not match" << endl
-                          << "existing model(s) (new " << model->getSampleRate()
-                          << " vs " << m_sourceSampleRate
-                          << "), playback will be wrong"
-                          << endl;
-                
-                emit sampleRateMismatch(model->getSampleRate(),
-                                        m_sourceSampleRate,
-                                        false);
-            } else {
-                m_sourceSampleRate = model->getSampleRate();
-                srChanged = true;
-            }
-        }
-    }
-
-    if (!m_writeBuffers || (int)m_writeBuffers->size() < getTargetChannelCount()) {
-	clearRingBuffers(true, getTargetChannelCount());
-	buffersChanged = true;
-    } else {
-	if (willPlay) clearRingBuffers(true);
-    }
-
-    if (buffersChanged || srChanged) {
-	if (m_converter) {
-	    src_delete(m_converter);
-            src_delete(m_crapConverter);
-	    m_converter = 0;
-            m_crapConverter = 0;
-	}
-    }
-
-    rebuildRangeLists();
-
-    m_mutex.unlock();
-
-    m_audioGenerator->setTargetChannelCount(getTargetChannelCount());
-
-    if (!m_fillThread) {
-	m_fillThread = new FillThread(*this);
-	m_fillThread->start();
-    }
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "AudioCallbackPlaySource::addModel: now have " << m_models.size() << " model(s) -- emitting modelReplaced" << endl;
-#endif
-
-    if (buffersChanged || srChanged) {
-	emit modelReplaced();
-    }
-
-    connect(model, SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)),
-            this, SLOT(modelChangedWithin(sv_frame_t, sv_frame_t)));
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "AudioCallbackPlaySource::addModel: awakening thread" << endl;
-#endif
-
-    m_condition.wakeAll();
-}
-
-void
-AudioCallbackPlaySource::modelChangedWithin(sv_frame_t 
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-                                            startFrame
-#endif
-                                            , sv_frame_t endFrame)
-{
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    SVDEBUG << "AudioCallbackPlaySource::modelChangedWithin(" << startFrame << "," << endFrame << ")" << endl;
-#endif
-    if (endFrame > m_lastModelEndFrame) {
-        m_lastModelEndFrame = endFrame;
-        rebuildRangeLists();
-    }
-}
-
-void
-AudioCallbackPlaySource::removeModel(Model *model)
-{
-    m_mutex.lock();
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "AudioCallbackPlaySource::removeModel(" << model << ")" << endl;
-#endif
-
-    disconnect(model, SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)),
-               this, SLOT(modelChangedWithin(sv_frame_t, sv_frame_t)));
-
-    m_models.erase(model);
-
-    if (m_models.empty()) {
-	if (m_converter) {
-	    src_delete(m_converter);
-            src_delete(m_crapConverter);
-	    m_converter = 0;
-            m_crapConverter = 0;
-	}
-	m_sourceSampleRate = 0;
-    }
-
-    sv_frame_t lastEnd = 0;
-    for (std::set<Model *>::const_iterator i = m_models.begin();
-	 i != m_models.end(); ++i) {
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-	cout << "AudioCallbackPlaySource::removeModel(" << model << "): checking end frame on model " << *i << endl;
-#endif
-	if ((*i)->getEndFrame() > lastEnd) {
-            lastEnd = (*i)->getEndFrame();
-        }
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-	cout << "(done, lastEnd now " << lastEnd << ")" << endl;
-#endif
-    }
-    m_lastModelEndFrame = lastEnd;
-
-    m_audioGenerator->removeModel(model);
-
-    m_mutex.unlock();
-
-    clearRingBuffers();
-}
-
-void
-AudioCallbackPlaySource::clearModels()
-{
-    m_mutex.lock();
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "AudioCallbackPlaySource::clearModels()" << endl;
-#endif
-
-    m_models.clear();
-
-    if (m_converter) {
-	src_delete(m_converter);
-        src_delete(m_crapConverter);
-	m_converter = 0;
-        m_crapConverter = 0;
-    }
-
-    m_lastModelEndFrame = 0;
-
-    m_sourceSampleRate = 0;
-
-    m_mutex.unlock();
-
-    m_audioGenerator->clearModels();
-
-    clearRingBuffers();
-}    
-
-void
-AudioCallbackPlaySource::clearRingBuffers(bool haveLock, int count)
-{
-    if (!haveLock) m_mutex.lock();
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cerr << "clearRingBuffers" << endl;
-#endif
-
-    rebuildRangeLists();
-
-    if (count == 0) {
-	if (m_writeBuffers) count = int(m_writeBuffers->size());
-    }
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cerr << "current playing frame = " << getCurrentPlayingFrame() << endl;
-
-    cerr << "write buffer fill (before) = " << m_writeBufferFill << endl;
-#endif
-    
-    m_writeBufferFill = getCurrentBufferedFrame();
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cerr << "current buffered frame = " << m_writeBufferFill << endl;
-#endif
-
-    if (m_readBuffers != m_writeBuffers) {
-	delete m_writeBuffers;
-    }
-
-    m_writeBuffers = new RingBufferVector;
-
-    for (int i = 0; i < count; ++i) {
-	m_writeBuffers->push_back(new RingBuffer<float>(m_ringBufferSize));
-    }
-
-    m_audioGenerator->reset();
-    
-//    cout << "AudioCallbackPlaySource::clearRingBuffers: Created "
-//	      << count << " write buffers" << endl;
-
-    if (!haveLock) {
-	m_mutex.unlock();
-    }
-}
-
-void
-AudioCallbackPlaySource::play(sv_frame_t startFrame)
-{
-    if (!m_sourceSampleRate) {
-        cerr << "AudioCallbackPlaySource::play: No source sample rate available, not playing" << endl;
-        return;
-    }
-    
-    if (m_viewManager->getPlaySelectionMode() &&
-	!m_viewManager->getSelections().empty()) {
-
-        SVDEBUG << "AudioCallbackPlaySource::play: constraining frame " << startFrame << " to selection = ";
-
-        startFrame = m_viewManager->constrainFrameToSelection(startFrame);
-
-        SVDEBUG << startFrame << endl;
-
-    } else {
-        if (startFrame < 0) {
-            startFrame = 0;
-        }
-	if (startFrame >= m_lastModelEndFrame) {
-	    startFrame = 0;
-	}
-    }
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cerr << "play(" << startFrame << ") -> playback model ";
-#endif
-
-    startFrame = m_viewManager->alignReferenceToPlaybackFrame(startFrame);
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cerr << startFrame << endl;
-#endif
-
-    // The fill thread will automatically empty its buffers before
-    // starting again if we have not so far been playing, but not if
-    // we're just re-seeking.
-    // NO -- we can end up playing some first -- always reset here
-
-    m_mutex.lock();
-
-    if (m_timeStretcher) {
-        m_timeStretcher->reset();
-    }
-    if (m_monoStretcher) {
-        m_monoStretcher->reset();
-    }
-
-    m_readBufferFill = m_writeBufferFill = startFrame;
-    if (m_readBuffers) {
-        for (int c = 0; c < getTargetChannelCount(); ++c) {
-            RingBuffer<float> *rb = getReadRingBuffer(c);
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-            cerr << "reset ring buffer for channel " << c << endl;
-#endif
-            if (rb) rb->reset();
-        }
-    }
-    if (m_converter) src_reset(m_converter);
-    if (m_crapConverter) src_reset(m_crapConverter);
-
-    m_mutex.unlock();
-
-    m_audioGenerator->reset();
-
-    m_playStartFrame = startFrame;
-    m_playStartFramePassed = false;
-    m_playStartedAt = RealTime::zeroTime;
-    if (m_target) {
-        m_playStartedAt = RealTime::fromSeconds(m_target->getCurrentTime());
-    }
-
-    bool changed = !m_playing;
-    m_lastRetrievalTimestamp = 0;
-    m_lastCurrentFrame = 0;
-    m_playing = true;
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "AudioCallbackPlaySource::play: awakening thread" << endl;
-#endif
-
-    m_condition.wakeAll();
-    if (changed) {
-        emit playStatusChanged(m_playing);
-        emit activity(tr("Play from %1").arg
-                      (RealTime::frame2RealTime
-                       (m_playStartFrame, m_sourceSampleRate).toText().c_str()));
-    }
-}
-
-void
-AudioCallbackPlaySource::stop()
-{
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    SVDEBUG << "AudioCallbackPlaySource::stop()" << endl;
-#endif
-    bool changed = m_playing;
-    m_playing = false;
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "AudioCallbackPlaySource::stop: awakening thread" << endl;
-#endif
-
-    m_condition.wakeAll();
-    m_lastRetrievalTimestamp = 0;
-    if (changed) {
-        emit playStatusChanged(m_playing);
-        emit activity(tr("Stop at %1").arg
-                      (RealTime::frame2RealTime
-                       (m_lastCurrentFrame, m_sourceSampleRate).toText().c_str()));
-    }
-    m_lastCurrentFrame = 0;
-}
-
-void
-AudioCallbackPlaySource::selectionChanged()
-{
-    if (m_viewManager->getPlaySelectionMode()) {
-	clearRingBuffers();
-    }
-}
-
-void
-AudioCallbackPlaySource::playLoopModeChanged()
-{
-    clearRingBuffers();
-}
-
-void
-AudioCallbackPlaySource::playSelectionModeChanged()
-{
-    if (!m_viewManager->getSelections().empty()) {
-	clearRingBuffers();
-    }
-}
-
-void
-AudioCallbackPlaySource::playParametersChanged(PlayParameters *)
-{
-    clearRingBuffers();
-}
-
-void
-AudioCallbackPlaySource::preferenceChanged(PropertyContainer::PropertyName n)
-{
-    if (n == "Resample Quality") {
-        setResampleQuality(Preferences::getInstance()->getResampleQuality());
-    }
-}
-
-void
-AudioCallbackPlaySource::audioProcessingOverload()
-{
-    cerr << "Audio processing overload!" << endl;
-
-    if (!m_playing) return;
-
-    RealTimePluginInstance *ap = m_auditioningPlugin;
-    if (ap && !m_auditioningPluginBypassed) {
-        m_auditioningPluginBypassed = true;
-        emit audioOverloadPluginDisabled();
-        return;
-    }
-
-    if (m_timeStretcher &&
-        m_timeStretcher->getTimeRatio() < 1.0 &&
-        m_stretcherInputCount > 1 &&
-        m_monoStretcher && !m_stretchMono) {
-        m_stretchMono = true;
-        emit audioTimeStretchMultiChannelDisabled();
-        return;
-    }
-}
-
-void
-AudioCallbackPlaySource::setTarget(AudioCallbackPlayTarget *target, int size)
-{
-    m_target = target;
-    cout << "AudioCallbackPlaySource::setTarget: Block size -> " << size << endl;
-    if (size != 0) {
-        m_blockSize = size;
-    }
-    if (size * 4 > m_ringBufferSize) {
-        SVDEBUG << "AudioCallbackPlaySource::setTarget: Buffer size "
-                  << size << " > a quarter of ring buffer size "
-                  << m_ringBufferSize << ", calling for more ring buffer"
-                  << endl;
-        m_ringBufferSize = size * 4;
-        if (m_writeBuffers && !m_writeBuffers->empty()) {
-            clearRingBuffers();
-        }
-    }
-}
-
-int
-AudioCallbackPlaySource::getTargetBlockSize() const
-{
-//    cout << "AudioCallbackPlaySource::getTargetBlockSize() -> " << m_blockSize << endl;
-    return int(m_blockSize);
-}
-
-void
-AudioCallbackPlaySource::setTargetPlayLatency(sv_frame_t latency)
-{
-    m_playLatency = latency;
-}
-
-sv_frame_t
-AudioCallbackPlaySource::getTargetPlayLatency() const
-{
-    return m_playLatency;
-}
-
-sv_frame_t
-AudioCallbackPlaySource::getCurrentPlayingFrame()
-{
-    // This method attempts to estimate which audio sample frame is
-    // "currently coming through the speakers".
-
-    sv_samplerate_t targetRate = getTargetSampleRate();
-    sv_frame_t latency = m_playLatency; // at target rate
-    RealTime latency_t = RealTime::zeroTime;
-
-    if (targetRate != 0) {
-        latency_t = RealTime::frame2RealTime(latency, targetRate);
-    }
-
-    return getCurrentFrame(latency_t);
-}
-
-sv_frame_t
-AudioCallbackPlaySource::getCurrentBufferedFrame()
-{
-    return getCurrentFrame(RealTime::zeroTime);
-}
-
-sv_frame_t
-AudioCallbackPlaySource::getCurrentFrame(RealTime latency_t)
-{
-    // We resample when filling the ring buffer, and time-stretch when
-    // draining it.  The buffer contains data at the "target rate" and
-    // the latency provided by the target is also at the target rate.
-    // Because of the multiple rates involved, we do the actual
-    // calculation using RealTime instead.
-
-    sv_samplerate_t sourceRate = getSourceSampleRate();
-    sv_samplerate_t targetRate = getTargetSampleRate();
-
-    if (sourceRate == 0 || targetRate == 0) return 0;
-
-    int inbuffer = 0; // at target rate
-
-    for (int c = 0; c < getTargetChannelCount(); ++c) {
-	RingBuffer<float> *rb = getReadRingBuffer(c);
-	if (rb) {
-	    int here = rb->getReadSpace();
-	    if (c == 0 || here < inbuffer) inbuffer = here;
-	}
-    }
-
-    sv_frame_t readBufferFill = m_readBufferFill;
-    sv_frame_t lastRetrievedBlockSize = m_lastRetrievedBlockSize;
-    double lastRetrievalTimestamp = m_lastRetrievalTimestamp;
-    double currentTime = 0.0;
-    if (m_target) currentTime = m_target->getCurrentTime();
-
-    bool looping = m_viewManager->getPlayLoopMode();
-
-    RealTime inbuffer_t = RealTime::frame2RealTime(inbuffer, targetRate);
-
-    sv_frame_t stretchlat = 0;
-    double timeRatio = 1.0;
-
-    if (m_timeStretcher) {
-        stretchlat = m_timeStretcher->getLatency();
-        timeRatio = m_timeStretcher->getTimeRatio();
-    }
-
-    RealTime stretchlat_t = RealTime::frame2RealTime(stretchlat, targetRate);
-
-    // When the target has just requested a block from us, the last
-    // sample it obtained was our buffer fill frame count minus the
-    // amount of read space (converted back to source sample rate)
-    // remaining now.  That sample is not expected to be played until
-    // the target's play latency has elapsed.  By the time the
-    // following block is requested, that sample will be at the
-    // target's play latency minus the last requested block size away
-    // from being played.
-
-    RealTime sincerequest_t = RealTime::zeroTime;
-    RealTime lastretrieved_t = RealTime::zeroTime;
-
-    if (m_target &&
-        m_trustworthyTimestamps &&
-        lastRetrievalTimestamp != 0.0) {
-
-        lastretrieved_t = RealTime::frame2RealTime
-            (lastRetrievedBlockSize, targetRate);
-
-        // calculate number of frames at target rate that have elapsed
-        // since the end of the last call to getSourceSamples
-
-        if (m_trustworthyTimestamps && !looping) {
-
-            // this adjustment seems to cause more problems when looping
-            double elapsed = currentTime - lastRetrievalTimestamp;
-
-            if (elapsed > 0.0) {
-                sincerequest_t = RealTime::fromSeconds(elapsed);
-            }
-        }
-
-    } else {
-
-        lastretrieved_t = RealTime::frame2RealTime
-            (getTargetBlockSize(), targetRate);
-    }
-
-    RealTime bufferedto_t = RealTime::frame2RealTime(readBufferFill, sourceRate);
-
-    if (timeRatio != 1.0) {
-        lastretrieved_t = lastretrieved_t / timeRatio;
-        sincerequest_t = sincerequest_t / timeRatio;
-        latency_t = latency_t / timeRatio;
-    }
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
-    cerr << "\nbuffered to: " << bufferedto_t << ", in buffer: " << inbuffer_t << ", time ratio " << timeRatio << "\n  stretcher latency: " << stretchlat_t << ", device latency: " << latency_t << "\n  since request: " << sincerequest_t << ", last retrieved quantity: " << lastretrieved_t << endl;
-#endif
-
-    // Normally the range lists should contain at least one item each
-    // -- if playback is unconstrained, that item should report the
-    // entire source audio duration.
-
-    if (m_rangeStarts.empty()) {
-        rebuildRangeLists();
-    }
-
-    if (m_rangeStarts.empty()) {
-        // this code is only used in case of error in rebuildRangeLists
-        RealTime playing_t = bufferedto_t
-            - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t
-            + sincerequest_t;
-        if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime;
-        sv_frame_t frame = RealTime::realTime2Frame(playing_t, sourceRate);
-        return m_viewManager->alignPlaybackFrameToReference(frame);
-    }
-
-    int inRange = 0;
-    int index = 0;
-
-    for (int i = 0; i < (int)m_rangeStarts.size(); ++i) {
-        if (bufferedto_t >= m_rangeStarts[i]) {
-            inRange = index;
-        } else {
-            break;
-        }
-        ++index;
-    }
-
-    if (inRange >= int(m_rangeStarts.size())) {
-        inRange = int(m_rangeStarts.size())-1;
-    }
-
-    RealTime playing_t = bufferedto_t;
-
-    playing_t = playing_t
-        - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t
-        + sincerequest_t;
-
-    // This rather gross little hack is used to ensure that latency
-    // compensation doesn't result in the playback pointer appearing
-    // to start earlier than the actual playback does.  It doesn't
-    // work properly (hence the bail-out in the middle) because if we
-    // are playing a relatively short looped region, the playing time
-    // estimated from the buffer fill frame may have wrapped around
-    // the region boundary and end up being much smaller than the
-    // theoretical play start frame, perhaps even for the entire
-    // duration of playback!
-
-    if (!m_playStartFramePassed) {
-        RealTime playstart_t = RealTime::frame2RealTime(m_playStartFrame,
-                                                        sourceRate);
-        if (playing_t < playstart_t) {
-//            cerr << "playing_t " << playing_t << " < playstart_t " 
-//                      << playstart_t << endl;
-            if (/*!!! sincerequest_t > RealTime::zeroTime && */
-                m_playStartedAt + latency_t + stretchlat_t <
-                RealTime::fromSeconds(currentTime)) {
-//                cerr << "but we've been playing for long enough that I think we should disregard it (it probably results from loop wrapping)" << endl;
-                m_playStartFramePassed = true;
-            } else {
-                playing_t = playstart_t;
-            }
-        } else {
-            m_playStartFramePassed = true;
-        }
-    }
- 
-#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
-    cerr << "playing_t " << playing_t;
-#endif
-
-    playing_t = playing_t - m_rangeStarts[inRange];
- 
-#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
-    cerr << " as offset into range " << inRange << " (start =" << m_rangeStarts[inRange] << " duration =" << m_rangeDurations[inRange] << ") = " << playing_t << endl;
-#endif
-
-    while (playing_t < RealTime::zeroTime) {
-
-        if (inRange == 0) {
-            if (looping) {
-                inRange = int(m_rangeStarts.size()) - 1;
-            } else {
-                break;
-            }
-        } else {
-            --inRange;
-        }
-
-        playing_t = playing_t + m_rangeDurations[inRange];
-    }
-
-    playing_t = playing_t + m_rangeStarts[inRange];
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
-    cerr << "  playing time: " << playing_t << endl;
-#endif
-
-    if (!looping) {
-        if (inRange == (int)m_rangeStarts.size()-1 &&
-            playing_t >= m_rangeStarts[inRange] + m_rangeDurations[inRange]) {
-cerr << "Not looping, inRange " << inRange << " == rangeStarts.size()-1, playing_t " << playing_t << " >= m_rangeStarts[inRange] " << m_rangeStarts[inRange] << " + m_rangeDurations[inRange] " << m_rangeDurations[inRange] << " -- stopping" << endl;
-            stop();
-        }
-    }
-
-    if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime;
-
-    sv_frame_t frame = RealTime::realTime2Frame(playing_t, sourceRate);
-
-    if (m_lastCurrentFrame > 0 && !looping) {
-        if (frame < m_lastCurrentFrame) {
-            frame = m_lastCurrentFrame;
-        }
-    }
-
-    m_lastCurrentFrame = frame;
-
-    return m_viewManager->alignPlaybackFrameToReference(frame);
-}
-
-void
-AudioCallbackPlaySource::rebuildRangeLists()
-{
-    bool constrained = (m_viewManager->getPlaySelectionMode());
-
-    m_rangeStarts.clear();
-    m_rangeDurations.clear();
-
-    sv_samplerate_t sourceRate = getSourceSampleRate();
-    if (sourceRate == 0) return;
-
-    RealTime end = RealTime::frame2RealTime(m_lastModelEndFrame, sourceRate);
-    if (end == RealTime::zeroTime) return;
-
-    if (!constrained) {
-        m_rangeStarts.push_back(RealTime::zeroTime);
-        m_rangeDurations.push_back(end);
-        return;
-    }
-
-    MultiSelection::SelectionList selections = m_viewManager->getSelections();
-    MultiSelection::SelectionList::const_iterator i;
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    SVDEBUG << "AudioCallbackPlaySource::rebuildRangeLists" << endl;
-#endif
-
-    if (!selections.empty()) {
-
-        for (i = selections.begin(); i != selections.end(); ++i) {
-            
-            RealTime start =
-                (RealTime::frame2RealTime
-                 (m_viewManager->alignReferenceToPlaybackFrame(i->getStartFrame()),
-                  sourceRate));
-            RealTime duration = 
-                (RealTime::frame2RealTime
-                 (m_viewManager->alignReferenceToPlaybackFrame(i->getEndFrame()) -
-                  m_viewManager->alignReferenceToPlaybackFrame(i->getStartFrame()),
-                  sourceRate));
-            
-            m_rangeStarts.push_back(start);
-            m_rangeDurations.push_back(duration);
-        }
-    } else {
-        m_rangeStarts.push_back(RealTime::zeroTime);
-        m_rangeDurations.push_back(end);
-    }
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cerr << "Now have " << m_rangeStarts.size() << " play ranges" << endl;
-#endif
-}
-
-void
-AudioCallbackPlaySource::setOutputLevels(float left, float right)
-{
-    m_outputLeft = left;
-    m_outputRight = right;
-}
-
-bool
-AudioCallbackPlaySource::getOutputLevels(float &left, float &right)
-{
-    left = m_outputLeft;
-    right = m_outputRight;
-    return true;
-}
-
-void
-AudioCallbackPlaySource::setTargetSampleRate(sv_samplerate_t sr)
-{
-    bool first = (m_targetSampleRate == 0);
-
-    m_targetSampleRate = sr;
-    initialiseConverter();
-
-    if (first && (m_stretchRatio != 1.f)) {
-        // couldn't create a stretcher before because we had no sample
-        // rate: make one now
-        setTimeStretch(m_stretchRatio);
-    }
-}
-
-void
-AudioCallbackPlaySource::initialiseConverter()
-{
-    m_mutex.lock();
-
-    if (m_converter) {
-        src_delete(m_converter);
-        src_delete(m_crapConverter);
-        m_converter = 0;
-        m_crapConverter = 0;
-    }
-
-    if (getSourceSampleRate() != getTargetSampleRate()) {
-
-	int err = 0;
-
-	m_converter = src_new(m_resampleQuality == 2 ? SRC_SINC_BEST_QUALITY :
-                              m_resampleQuality == 1 ? SRC_SINC_MEDIUM_QUALITY :
-                              m_resampleQuality == 0 ? SRC_SINC_FASTEST :
-                                                       SRC_SINC_MEDIUM_QUALITY,
-			      getTargetChannelCount(), &err);
-
-        if (m_converter) {
-            m_crapConverter = src_new(SRC_LINEAR,
-                                      getTargetChannelCount(),
-                                      &err);
-        }
-
-	if (!m_converter || !m_crapConverter) {
-	    cerr
-		<< "AudioCallbackPlaySource::setModel: ERROR in creating samplerate converter: "
-		<< src_strerror(err) << endl;
-
-            if (m_converter) {
-                src_delete(m_converter);
-                m_converter = 0;
-            } 
-
-            if (m_crapConverter) {
-                src_delete(m_crapConverter);
-                m_crapConverter = 0;
-            }
-
-            m_mutex.unlock();
-
-            emit sampleRateMismatch(getSourceSampleRate(),
-                                    getTargetSampleRate(),
-                                    false);
-	} else {
-
-            m_mutex.unlock();
-
-            emit sampleRateMismatch(getSourceSampleRate(),
-                                    getTargetSampleRate(),
-                                    true);
-        }
-    } else {
-        m_mutex.unlock();
-    }
-}
-
-void
-AudioCallbackPlaySource::setResampleQuality(int q)
-{
-    if (q == m_resampleQuality) return;
-    m_resampleQuality = q;
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    SVDEBUG << "AudioCallbackPlaySource::setResampleQuality: setting to "
-              << m_resampleQuality << endl;
-#endif
-
-    initialiseConverter();
-}
-
-void
-AudioCallbackPlaySource::setAuditioningEffect(Auditionable *a)
-{
-    RealTimePluginInstance *plugin = dynamic_cast<RealTimePluginInstance *>(a);
-    if (a && !plugin) {
-        cerr << "WARNING: AudioCallbackPlaySource::setAuditioningEffect: auditionable object " << a << " is not a real-time plugin instance" << endl;
-    }
-
-    m_mutex.lock();
-    m_auditioningPlugin = plugin;
-    m_auditioningPluginBypassed = false;
-    m_mutex.unlock();
-}
-
-void
-AudioCallbackPlaySource::setSoloModelSet(std::set<Model *> s)
-{
-    m_audioGenerator->setSoloModelSet(s);
-    clearRingBuffers();
-}
-
-void
-AudioCallbackPlaySource::clearSoloModelSet()
-{
-    m_audioGenerator->clearSoloModelSet();
-    clearRingBuffers();
-}
-
-sv_samplerate_t
-AudioCallbackPlaySource::getTargetSampleRate() const
-{
-    if (m_targetSampleRate) return m_targetSampleRate;
-    else return getSourceSampleRate();
-}
-
-int
-AudioCallbackPlaySource::getSourceChannelCount() const
-{
-    return m_sourceChannelCount;
-}
-
-int
-AudioCallbackPlaySource::getTargetChannelCount() const
-{
-    if (m_sourceChannelCount < 2) return 2;
-    return m_sourceChannelCount;
-}
-
-sv_samplerate_t
-AudioCallbackPlaySource::getSourceSampleRate() const
-{
-    return m_sourceSampleRate;
-}
-
-void
-AudioCallbackPlaySource::setTimeStretch(double factor)
-{
-    m_stretchRatio = factor;
-
-    if (!getTargetSampleRate()) return; // have to make our stretcher later
-
-    if (m_timeStretcher || (factor == 1.0)) {
-        // stretch ratio will be set in next process call if appropriate
-    } else {
-        m_stretcherInputCount = getTargetChannelCount();
-        RubberBandStretcher *stretcher = new RubberBandStretcher
-            (int(getTargetSampleRate()),
-             m_stretcherInputCount,
-             RubberBandStretcher::OptionProcessRealTime,
-             factor);
-        RubberBandStretcher *monoStretcher = new RubberBandStretcher
-            (int(getTargetSampleRate()),
-             1,
-             RubberBandStretcher::OptionProcessRealTime,
-             factor);
-        m_stretcherInputs = new float *[m_stretcherInputCount];
-        m_stretcherInputSizes = new sv_frame_t[m_stretcherInputCount];
-        for (int c = 0; c < m_stretcherInputCount; ++c) {
-            m_stretcherInputSizes[c] = 16384;
-            m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]];
-        }
-        m_monoStretcher = monoStretcher;
-        m_timeStretcher = stretcher;
-    }
-
-    emit activity(tr("Change time-stretch factor to %1").arg(factor));
-}
-
-sv_frame_t
-AudioCallbackPlaySource::getSourceSamples(sv_frame_t count, float **buffer)
-{
-    if (!m_playing) {
-#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
-        SVDEBUG << "AudioCallbackPlaySource::getSourceSamples: Not playing" << endl;
-#endif
-	for (int ch = 0; ch < getTargetChannelCount(); ++ch) {
-	    for (int i = 0; i < count; ++i) {
-		buffer[ch][i] = 0.0;
-	    }
-	}
-	return 0;
-    }
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
-    SVDEBUG << "AudioCallbackPlaySource::getSourceSamples: Playing" << endl;
-#endif
-
-    // Ensure that all buffers have at least the amount of data we
-    // need -- else reduce the size of our requests correspondingly
-
-    for (int ch = 0; ch < getTargetChannelCount(); ++ch) {
-
-        RingBuffer<float> *rb = getReadRingBuffer(ch);
-        
-        if (!rb) {
-            cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: "
-                      << "No ring buffer available for channel " << ch
-                      << ", returning no data here" << endl;
-            count = 0;
-            break;
-        }
-
-        int rs = rb->getReadSpace();
-        if (rs < count) {
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-            cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: "
-                      << "Ring buffer for channel " << ch << " has only "
-                      << rs << " (of " << count << ") samples available ("
-                      << "ring buffer size is " << rb->getSize() << ", write "
-                      << "space " << rb->getWriteSpace() << "), "
-                      << "reducing request size" << endl;
-#endif
-            count = rs;
-        }
-    }
-
-    if (count == 0) return 0;
-
-    RubberBandStretcher *ts = m_timeStretcher;
-    RubberBandStretcher *ms = m_monoStretcher;
-
-    double ratio = ts ? ts->getTimeRatio() : 1.0;
-
-    if (ratio != m_stretchRatio) {
-        if (!ts) {
-            cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: Time ratio change to " << m_stretchRatio << " is pending, but no stretcher is set" << endl;
-            m_stretchRatio = 1.0;
-        } else {
-            ts->setTimeRatio(m_stretchRatio);
-            if (ms) ms->setTimeRatio(m_stretchRatio);
-            if (m_stretchRatio >= 1.0) m_stretchMono = false;
-        }
-    }
-
-    int stretchChannels = m_stretcherInputCount;
-    if (m_stretchMono) {
-        if (ms) {
-            ts = ms;
-            stretchChannels = 1;
-        } else {
-            m_stretchMono = false;
-        }
-    }
-
-    if (m_target) {
-        m_lastRetrievedBlockSize = count;
-        m_lastRetrievalTimestamp = m_target->getCurrentTime();
-    }
-
-    if (!ts || ratio == 1.f) {
-
-	int got = 0;
-
-	for (int ch = 0; ch < getTargetChannelCount(); ++ch) {
-
-	    RingBuffer<float> *rb = getReadRingBuffer(ch);
-
-	    if (rb) {
-
-		// this is marginally more likely to leave our channels in
-		// sync after a processing failure than just passing "count":
-		sv_frame_t request = count;
-		if (ch > 0) request = got;
-
-		got = rb->read(buffer[ch], int(request));
-	    
-#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
-		cout << "AudioCallbackPlaySource::getSamples: got " << got << " (of " << count << ") samples on channel " << ch << ", signalling for more (possibly)" << endl;
-#endif
-	    }
-
-	    for (int ch = 0; ch < getTargetChannelCount(); ++ch) {
-		for (int i = got; i < count; ++i) {
-		    buffer[ch][i] = 0.0;
-		}
-	    }
-	}
-
-        applyAuditioningEffect(count, buffer);
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "AudioCallbackPlaySource::getSamples: awakening thread" << endl;
-#endif
-
-        m_condition.wakeAll();
-
-	return got;
-    }
-
-    int channels = getTargetChannelCount();
-    sv_frame_t available;
-    sv_frame_t fedToStretcher = 0;
-    int warned = 0;
-
-    // The input block for a given output is approx output / ratio,
-    // but we can't predict it exactly, for an adaptive timestretcher.
-
-    while ((available = ts->available()) < count) {
-
-        sv_frame_t reqd = lrint(double(count - available) / ratio);
-        reqd = std::max(reqd, sv_frame_t(ts->getSamplesRequired()));
-        if (reqd == 0) reqd = 1;
-                
-        sv_frame_t got = reqd;
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
-        cerr << "reqd = " <<reqd << ", channels = " << channels << ", ic = " << m_stretcherInputCount << endl;
-#endif
-
-        for (int c = 0; c < channels; ++c) {
-            if (c >= m_stretcherInputCount) continue;
-            if (reqd > m_stretcherInputSizes[c]) {
-                if (c == 0) {
-                    cerr << "WARNING: resizing stretcher input buffer from " << m_stretcherInputSizes[c] << " to " << (reqd * 2) << endl;
-                }
-                delete[] m_stretcherInputs[c];
-                m_stretcherInputSizes[c] = reqd * 2;
-                m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]];
-            }
-        }
-
-        for (int c = 0; c < channels; ++c) {
-            if (c >= m_stretcherInputCount) continue;
-            RingBuffer<float> *rb = getReadRingBuffer(c);
-            if (rb) {
-                sv_frame_t gotHere;
-                if (stretchChannels == 1 && c > 0) {
-                    gotHere = rb->readAdding(m_stretcherInputs[0], int(got));
-                } else {
-                    gotHere = rb->read(m_stretcherInputs[c], int(got));
-                }
-                if (gotHere < got) got = gotHere;
-                
-#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
-                if (c == 0) {
-                    SVDEBUG << "feeding stretcher: got " << gotHere
-                              << ", " << rb->getReadSpace() << " remain" << endl;
-                }
-#endif
-                
-            } else {
-                cerr << "WARNING: No ring buffer available for channel " << c << " in stretcher input block" << endl;
-            }
-        }
-
-        if (got < reqd) {
-            cerr << "WARNING: Read underrun in playback ("
-                      << got << " < " << reqd << ")" << endl;
-        }
-
-        ts->process(m_stretcherInputs, size_t(got), false);
-
-        fedToStretcher += got;
-
-        if (got == 0) break;
-
-        if (ts->available() == available) {
-            cerr << "WARNING: AudioCallbackPlaySource::getSamples: Added " << got << " samples to time stretcher, created no new available output samples (warned = " << warned << ")" << endl;
-            if (++warned == 5) break;
-        }
-    }
-
-    ts->retrieve(buffer, size_t(count));
-
-    for (int c = stretchChannels; c < getTargetChannelCount(); ++c) {
-        for (int i = 0; i < count; ++i) {
-            buffer[c][i] = buffer[0][i];
-        }
-    }
-
-    applyAuditioningEffect(count, buffer);
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "AudioCallbackPlaySource::getSamples [stretched]: awakening thread" << endl;
-#endif
-
-    m_condition.wakeAll();
-
-    return count;
-}
-
-void
-AudioCallbackPlaySource::applyAuditioningEffect(sv_frame_t count, float **buffers)
-{
-    if (m_auditioningPluginBypassed) return;
-    RealTimePluginInstance *plugin = m_auditioningPlugin;
-    if (!plugin) return;
-    
-    if ((int)plugin->getAudioInputCount() != getTargetChannelCount()) {
-//        cerr << "plugin input count " << plugin->getAudioInputCount() 
-//                  << " != our channel count " << getTargetChannelCount()
-//                  << endl;
-        return;
-    }
-    if ((int)plugin->getAudioOutputCount() != getTargetChannelCount()) {
-//        cerr << "plugin output count " << plugin->getAudioOutputCount() 
-//                  << " != our channel count " << getTargetChannelCount()
-//                  << endl;
-        return;
-    }
-    if ((int)plugin->getBufferSize() < count) {
-//        cerr << "plugin buffer size " << plugin->getBufferSize() 
-//                  << " < our block size " << count
-//                  << endl;
-        return;
-    }
-
-    float **ib = plugin->getAudioInputBuffers();
-    float **ob = plugin->getAudioOutputBuffers();
-
-    for (int c = 0; c < getTargetChannelCount(); ++c) {
-        for (int i = 0; i < count; ++i) {
-            ib[c][i] = buffers[c][i];
-        }
-    }
-
-    plugin->run(Vamp::RealTime::zeroTime, int(count));
-    
-    for (int c = 0; c < getTargetChannelCount(); ++c) {
-        for (int i = 0; i < count; ++i) {
-            buffers[c][i] = ob[c][i];
-        }
-    }
-}    
-
-// Called from fill thread, m_playing true, mutex held
-bool
-AudioCallbackPlaySource::fillBuffers()
-{
-    static float *tmp = 0;
-    static sv_frame_t tmpSize = 0;
-
-    sv_frame_t space = 0;
-    for (int c = 0; c < getTargetChannelCount(); ++c) {
-	RingBuffer<float> *wb = getWriteRingBuffer(c);
-	if (wb) {
-	    sv_frame_t spaceHere = wb->getWriteSpace();
-	    if (c == 0 || spaceHere < space) space = spaceHere;
-	}
-    }
-    
-    if (space == 0) {
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-        cout << "AudioCallbackPlaySourceFillThread: no space to fill" << endl;
-#endif
-        return false;
-    }
-
-    sv_frame_t f = m_writeBufferFill;
-	
-    bool readWriteEqual = (m_readBuffers == m_writeBuffers);
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    if (!readWriteEqual) {
-        cout << "AudioCallbackPlaySourceFillThread: note read buffers != write buffers" << endl;
-    }
-    cout << "AudioCallbackPlaySourceFillThread: filling " << space << " frames" << endl;
-#endif
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "buffered to " << f << " already" << endl;
-#endif
-
-    bool resample = (getSourceSampleRate() != getTargetSampleRate());
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << (resample ? "" : "not ") << "resampling (source " << getSourceSampleRate() << ", target " << getTargetSampleRate() << ")" << endl;
-#endif
-
-    int channels = getTargetChannelCount();
-
-    sv_frame_t orig = space;
-    sv_frame_t got = 0;
-
-    static float **bufferPtrs = 0;
-    static int bufferPtrCount = 0;
-
-    if (bufferPtrCount < channels) {
-	if (bufferPtrs) delete[] bufferPtrs;
-	bufferPtrs = new float *[channels];
-	bufferPtrCount = channels;
-    }
-
-    sv_frame_t generatorBlockSize = m_audioGenerator->getBlockSize();
-
-    if (resample && !m_converter) {
-	static bool warned = false;
-	if (!warned) {
-	    cerr << "WARNING: sample rates differ, but no converter available!" << endl;
-	    warned = true;
-	}
-    }
-
-    if (resample && m_converter) {
-
-	double ratio =
-	    double(getTargetSampleRate()) / double(getSourceSampleRate());
-	orig = sv_frame_t(double(orig) / ratio + 0.1);
-
-	// orig must be a multiple of generatorBlockSize
-	orig = (orig / generatorBlockSize) * generatorBlockSize;
-	if (orig == 0) return false;
-
-	sv_frame_t work = std::max(orig, space);
-
-	// We only allocate one buffer, but we use it in two halves.
-	// We place the non-interleaved values in the second half of
-	// the buffer (orig samples for channel 0, orig samples for
-	// channel 1 etc), and then interleave them into the first
-	// half of the buffer.  Then we resample back into the second
-	// half (interleaved) and de-interleave the results back to
-	// the start of the buffer for insertion into the ringbuffers.
-	// What a faff -- especially as we've already de-interleaved
-	// the audio data from the source file elsewhere before we
-	// even reach this point.
-	
-	if (tmpSize < channels * work * 2) {
-	    delete[] tmp;
-	    tmp = new float[channels * work * 2];
-	    tmpSize = channels * work * 2;
-	}
-
-	float *nonintlv = tmp + channels * work;
-	float *intlv = tmp;
-	float *srcout = tmp + channels * work;
-	
-	for (int c = 0; c < channels; ++c) {
-	    for (int i = 0; i < orig; ++i) {
-		nonintlv[channels * i + c] = 0.0f;
-	    }
-	}
-
-	for (int c = 0; c < channels; ++c) {
-	    bufferPtrs[c] = nonintlv + c * orig;
-	}
-
-	got = mixModels(f, orig, bufferPtrs); // also modifies f
-
-	// and interleave into first half
-	for (int c = 0; c < channels; ++c) {
-	    for (int i = 0; i < got; ++i) {
-		float sample = nonintlv[c * got + i];
-		intlv[channels * i + c] = sample;
-	    }
-	}
-		
-	SRC_DATA data;
-	data.data_in = intlv;
-	data.data_out = srcout;
-	data.input_frames = long(got);
-	data.output_frames = long(work);
-	data.src_ratio = ratio;
-	data.end_of_input = 0;
-	
-	int err = 0;
-
-        if (m_timeStretcher && m_timeStretcher->getTimeRatio() < 0.4) {
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-            cout << "Using crappy converter" << endl;
-#endif
-            err = src_process(m_crapConverter, &data);
-        } else {
-            err = src_process(m_converter, &data);
-        }
-
-	sv_frame_t toCopy = sv_frame_t(double(got) * ratio + 0.1);
-
-	if (err) {
-	    cerr
-		<< "AudioCallbackPlaySourceFillThread: ERROR in samplerate conversion: "
-		<< src_strerror(err) << endl;
-	    //!!! Then what?
-	} else {
-	    got = data.input_frames_used;
-	    toCopy = data.output_frames_gen;
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-	    cout << "Resampled " << got << " frames to " << toCopy << " frames" << endl;
-#endif
-	}
-	
-	for (int c = 0; c < channels; ++c) {
-	    for (int i = 0; i < toCopy; ++i) {
-		tmp[i] = srcout[channels * i + c];
-	    }
-	    RingBuffer<float> *wb = getWriteRingBuffer(c);
-	    if (wb) wb->write(tmp, int(toCopy));
-	}
-
-	m_writeBufferFill = f;
-	if (readWriteEqual) m_readBufferFill = f;
-
-    } else {
-
-	// space must be a multiple of generatorBlockSize
-        sv_frame_t reqSpace = space;
-	space = (reqSpace / generatorBlockSize) * generatorBlockSize;
-	if (space == 0) {
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-            cout << "requested fill of " << reqSpace
-                      << " is less than generator block size of "
-                      << generatorBlockSize << ", leaving it" << endl;
-#endif
-            return false;
-        }
-
-	if (tmpSize < channels * space) {
-	    delete[] tmp;
-	    tmp = new float[channels * space];
-	    tmpSize = channels * space;
-	}
-
-	for (int c = 0; c < channels; ++c) {
-
-	    bufferPtrs[c] = tmp + c * space;
-	    
-	    for (int i = 0; i < space; ++i) {
-		tmp[c * space + i] = 0.0f;
-	    }
-	}
-
-	sv_frame_t got = mixModels(f, space, bufferPtrs); // also modifies f
-
-	for (int c = 0; c < channels; ++c) {
-
-	    RingBuffer<float> *wb = getWriteRingBuffer(c);
-	    if (wb) {
-                int actual = wb->write(bufferPtrs[c], int(got));
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-		cout << "Wrote " << actual << " samples for ch " << c << ", now "
-			  << wb->getReadSpace() << " to read" 
-			  << endl;
-#endif
-                if (actual < got) {
-                    cerr << "WARNING: Buffer overrun in channel " << c
-                              << ": wrote " << actual << " of " << got
-                              << " samples" << endl;
-                }
-            }
-	}
-
-	m_writeBufferFill = f;
-	if (readWriteEqual) m_readBufferFill = f;
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-        cout << "Read buffer fill is now " << m_readBufferFill << endl;
-#endif
-
-	//!!! how do we know when ended? need to mark up a fully-buffered flag and check this if we find the buffers empty in getSourceSamples
-    }
-
-    return true;
-}    
-
-sv_frame_t
-AudioCallbackPlaySource::mixModels(sv_frame_t &frame, sv_frame_t count, float **buffers)
-{
-    sv_frame_t processed = 0;
-    sv_frame_t chunkStart = frame;
-    sv_frame_t chunkSize = count;
-    sv_frame_t selectionSize = 0;
-    sv_frame_t nextChunkStart = chunkStart + chunkSize;
-    
-    bool looping = m_viewManager->getPlayLoopMode();
-    bool constrained = (m_viewManager->getPlaySelectionMode() &&
-			!m_viewManager->getSelections().empty());
-
-    static float **chunkBufferPtrs = 0;
-    static int chunkBufferPtrCount = 0;
-    int channels = getTargetChannelCount();
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "Selection playback: start " << frame << ", size " << count <<", channels " << channels << endl;
-#endif
-
-    if (chunkBufferPtrCount < channels) {
-	if (chunkBufferPtrs) delete[] chunkBufferPtrs;
-	chunkBufferPtrs = new float *[channels];
-	chunkBufferPtrCount = channels;
-    }
-
-    for (int c = 0; c < channels; ++c) {
-	chunkBufferPtrs[c] = buffers[c];
-    }
-
-    while (processed < count) {
-	
-	chunkSize = count - processed;
-	nextChunkStart = chunkStart + chunkSize;
-	selectionSize = 0;
-
-	sv_frame_t fadeIn = 0, fadeOut = 0;
-
-	if (constrained) {
-
-            sv_frame_t rChunkStart =
-                m_viewManager->alignPlaybackFrameToReference(chunkStart);
-	    
-	    Selection selection =
-		m_viewManager->getContainingSelection(rChunkStart, true);
-	    
-	    if (selection.isEmpty()) {
-		if (looping) {
-		    selection = *m_viewManager->getSelections().begin();
-		    chunkStart = m_viewManager->alignReferenceToPlaybackFrame
-                        (selection.getStartFrame());
-		    fadeIn = 50;
-		}
-	    }
-
-	    if (selection.isEmpty()) {
-
-		chunkSize = 0;
-		nextChunkStart = chunkStart;
-
-	    } else {
-
-                sv_frame_t sf = m_viewManager->alignReferenceToPlaybackFrame
-                    (selection.getStartFrame());
-                sv_frame_t ef = m_viewManager->alignReferenceToPlaybackFrame
-                    (selection.getEndFrame());
-
-		selectionSize = ef - sf;
-
-		if (chunkStart < sf) {
-		    chunkStart = sf;
-		    fadeIn = 50;
-		}
-
-		nextChunkStart = chunkStart + chunkSize;
-
-		if (nextChunkStart >= ef) {
-		    nextChunkStart = ef;
-		    fadeOut = 50;
-		}
-
-		chunkSize = nextChunkStart - chunkStart;
-	    }
-	
-	} else if (looping && m_lastModelEndFrame > 0) {
-
-	    if (chunkStart >= m_lastModelEndFrame) {
-		chunkStart = 0;
-	    }
-	    if (chunkSize > m_lastModelEndFrame - chunkStart) {
-		chunkSize = m_lastModelEndFrame - chunkStart;
-	    }
-	    nextChunkStart = chunkStart + chunkSize;
-	}
-	
-//	cout << "chunkStart " << chunkStart << ", chunkSize " << chunkSize << ", nextChunkStart " << nextChunkStart << ", frame " << frame << ", count " << count << ", processed " << processed << endl;
-
-	if (!chunkSize) {
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-	    cout << "Ending selection playback at " << nextChunkStart << endl;
-#endif
-	    // We need to maintain full buffers so that the other
-	    // thread can tell where it's got to in the playback -- so
-	    // return the full amount here
-	    frame = frame + count;
-	    return count;
-	}
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-	cout << "Selection playback: chunk at " << chunkStart << " -> " << nextChunkStart << " (size " << chunkSize << ")" << endl;
-#endif
-
-	if (selectionSize < 100) {
-	    fadeIn = 0;
-	    fadeOut = 0;
-	} else if (selectionSize < 300) {
-	    if (fadeIn > 0) fadeIn = 10;
-	    if (fadeOut > 0) fadeOut = 10;
-	}
-
-	if (fadeIn > 0) {
-	    if (processed * 2 < fadeIn) {
-		fadeIn = processed * 2;
-	    }
-	}
-
-	if (fadeOut > 0) {
-	    if ((count - processed - chunkSize) * 2 < fadeOut) {
-		fadeOut = (count - processed - chunkSize) * 2;
-	    }
-	}
-
-	for (std::set<Model *>::iterator mi = m_models.begin();
-	     mi != m_models.end(); ++mi) {
-	    
-	    (void) m_audioGenerator->mixModel(*mi, chunkStart, 
-                                              chunkSize, chunkBufferPtrs,
-                                              fadeIn, fadeOut);
-	}
-
-	for (int c = 0; c < channels; ++c) {
-	    chunkBufferPtrs[c] += chunkSize;
-	}
-
-	processed += chunkSize;
-	chunkStart = nextChunkStart;
-    }
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "Returning selection playback " << processed << " frames to " << nextChunkStart << endl;
-#endif
-
-    frame = nextChunkStart;
-    return processed;
-}
-
-void
-AudioCallbackPlaySource::unifyRingBuffers()
-{
-    if (m_readBuffers == m_writeBuffers) return;
-
-    // only unify if there will be something to read
-    for (int c = 0; c < getTargetChannelCount(); ++c) {
-	RingBuffer<float> *wb = getWriteRingBuffer(c);
-	if (wb) {
-	    if (wb->getReadSpace() < m_blockSize * 2) {
-		if ((m_writeBufferFill + m_blockSize * 2) < 
-		    m_lastModelEndFrame) {
-		    // OK, we don't have enough and there's more to
-		    // read -- don't unify until we can do better
-#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
-                    SVDEBUG << "AudioCallbackPlaySource::unifyRingBuffers: Not unifying: write buffer has less (" << wb->getReadSpace() << ") than " << m_blockSize*2 << " to read and write buffer fill (" << m_writeBufferFill << ") is not close to end frame (" << m_lastModelEndFrame << ")" << endl;
-#endif
-		    return;
-		}
-	    }
-	    break;
-	}
-    }
-
-    sv_frame_t rf = m_readBufferFill;
-    RingBuffer<float> *rb = getReadRingBuffer(0);
-    if (rb) {
-	int rs = rb->getReadSpace();
-	//!!! incorrect when in non-contiguous selection, see comments elsewhere
-//	cout << "rs = " << rs << endl;
-	if (rs < rf) rf -= rs;
-	else rf = 0;
-    }
-    
-#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
-    SVDEBUG << "AudioCallbackPlaySource::unifyRingBuffers: m_readBufferFill = " << m_readBufferFill << ", rf = " << rf << ", m_writeBufferFill = " << m_writeBufferFill << endl;
-#endif
-
-    sv_frame_t wf = m_writeBufferFill;
-    sv_frame_t skip = 0;
-    for (int c = 0; c < getTargetChannelCount(); ++c) {
-	RingBuffer<float> *wb = getWriteRingBuffer(c);
-	if (wb) {
-	    if (c == 0) {
-		
-		int wrs = wb->getReadSpace();
-//		cout << "wrs = " << wrs << endl;
-
-		if (wrs < wf) wf -= wrs;
-		else wf = 0;
-//		cout << "wf = " << wf << endl;
-		
-		if (wf < rf) skip = rf - wf;
-		if (skip == 0) break;
-	    }
-
-//	    cout << "skipping " << skip << endl;
-	    wb->skip(int(skip));
-	}
-    }
-		    
-    m_bufferScavenger.claim(m_readBuffers);
-    m_readBuffers = m_writeBuffers;
-    m_readBufferFill = m_writeBufferFill;
-#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
-    cerr << "unified" << endl;
-#endif
-}
-
-void
-AudioCallbackPlaySource::FillThread::run()
-{
-    AudioCallbackPlaySource &s(m_source);
-    
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "AudioCallbackPlaySourceFillThread starting" << endl;
-#endif
-
-    s.m_mutex.lock();
-
-    bool previouslyPlaying = s.m_playing;
-    bool work = false;
-
-    while (!s.m_exiting) {
-
-	s.unifyRingBuffers();
-	s.m_bufferScavenger.scavenge();
-        s.m_pluginScavenger.scavenge();
-
-	if (work && s.m_playing && s.getSourceSampleRate()) {
-	    
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-	    cout << "AudioCallbackPlaySourceFillThread: not waiting" << endl;
-#endif
-
-	    s.m_mutex.unlock();
-	    s.m_mutex.lock();
-
-	} else {
-	    
-	    double ms = 100;
-	    if (s.getSourceSampleRate() > 0) {
-		ms = double(s.m_ringBufferSize) / s.getSourceSampleRate() * 1000.0;
-	    }
-	    
-	    if (s.m_playing) ms /= 10;
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-            if (!s.m_playing) cout << endl;
-	    cout << "AudioCallbackPlaySourceFillThread: waiting for " << ms << "ms..." << endl;
-#endif
-	    
-	    s.m_condition.wait(&s.m_mutex, int(ms));
-	}
-
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-	cout << "AudioCallbackPlaySourceFillThread: awoken" << endl;
-#endif
-
-	work = false;
-
-	if (!s.getSourceSampleRate()) {
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-            cout << "AudioCallbackPlaySourceFillThread: source sample rate is zero" << endl;
-#endif
-            continue;
-        }
-
-	bool playing = s.m_playing;
-
-	if (playing && !previouslyPlaying) {
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-	    cout << "AudioCallbackPlaySourceFillThread: playback state changed, resetting" << endl;
-#endif
-	    for (int c = 0; c < s.getTargetChannelCount(); ++c) {
-		RingBuffer<float> *rb = s.getReadRingBuffer(c);
-		if (rb) rb->reset();
-	    }
-	}
-	previouslyPlaying = playing;
-
-	work = s.fillBuffers();
-    }
-
-    s.m_mutex.unlock();
-}
-
--- a/audioio/AudioCallbackPlaySource.h	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,384 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam and QMUL.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef _AUDIO_CALLBACK_PLAY_SOURCE_H_
-#define _AUDIO_CALLBACK_PLAY_SOURCE_H_
-
-#include "base/RingBuffer.h"
-#include "base/AudioPlaySource.h"
-#include "base/PropertyContainer.h"
-#include "base/Scavenger.h"
-
-#include <QObject>
-#include <QMutex>
-#include <QWaitCondition>
-
-#include "base/Thread.h"
-#include "base/RealTime.h"
-
-#include <samplerate.h>
-
-#include <set>
-#include <map>
-
-namespace RubberBand {
-    class RubberBandStretcher;
-}
-
-class Model;
-class ViewManagerBase;
-class AudioGenerator;
-class PlayParameters;
-class RealTimePluginInstance;
-class AudioCallbackPlayTarget;
-
-/**
- * AudioCallbackPlaySource manages audio data supply to callback-based
- * audio APIs such as JACK or CoreAudio.  It maintains one ring buffer
- * per channel, filled during playback by a non-realtime thread, and
- * provides a method for a realtime thread to pick up the latest
- * available sample data from these buffers.
- */
-class AudioCallbackPlaySource : public QObject,
-				public AudioPlaySource
-{
-    Q_OBJECT
-
-public:
-    AudioCallbackPlaySource(ViewManagerBase *, QString clientName);
-    virtual ~AudioCallbackPlaySource();
-    
-    /**
-     * Add a data model to be played from.  The source can mix
-     * playback from a number of sources including dense and sparse
-     * models.  The models must match in sample rate, but they don't
-     * have to have identical numbers of channels.
-     */
-    virtual void addModel(Model *model);
-
-    /**
-     * Remove a model.
-     */
-    virtual void removeModel(Model *model);
-
-    /**
-     * Remove all models.  (Silence will ensue.)
-     */
-    virtual void clearModels();
-
-    /**
-     * Start making data available in the ring buffers for playback,
-     * from the given frame.  If playback is already under way, reseek
-     * to the given frame and continue.
-     */
-    virtual void play(sv_frame_t startFrame);
-
-    /**
-     * Stop playback and ensure that no more data is returned.
-     */
-    virtual void stop();
-
-    /**
-     * Return whether playback is currently supposed to be happening.
-     */
-    virtual bool isPlaying() const { return m_playing; }
-
-    /**
-     * Return the frame number that is currently expected to be coming
-     * out of the speakers.  (i.e. compensating for playback latency.)
-     */
-    virtual sv_frame_t getCurrentPlayingFrame();
-    
-    /** 
-     * Return the last frame that would come out of the speakers if we
-     * stopped playback right now.
-     */
-    virtual sv_frame_t getCurrentBufferedFrame();
-
-    /**
-     * Return the frame at which playback is expected to end (if not looping).
-     */
-    virtual sv_frame_t getPlayEndFrame() { return m_lastModelEndFrame; }
-
-    /**
-     * Set the target and the block size of the target audio device.
-     * This should be called by the target class.
-     */
-    void setTarget(AudioCallbackPlayTarget *, int blockSize);
-
-    /**
-     * Get the block size of the target audio device.  This may be an
-     * estimate or upper bound, if the target has a variable block
-     * size; the source should behave itself even if this value turns
-     * out to be inaccurate.
-     */
-    int getTargetBlockSize() const;
-
-    /**
-     * Set the playback latency of the target audio device, in frames
-     * at the target sample rate.  This is the difference between the
-     * frame currently "leaving the speakers" and the last frame (or
-     * highest last frame across all channels) requested via
-     * getSamples().  The default is zero.
-     */
-    void setTargetPlayLatency(sv_frame_t);
-
-    /**
-     * Get the playback latency of the target audio device.
-     */
-    sv_frame_t getTargetPlayLatency() const;
-
-    /**
-     * Specify that the target audio device has a fixed sample rate
-     * (i.e. cannot accommodate arbitrary sample rates based on the
-     * source).  If the target sets this to something other than the
-     * source sample rate, this class will resample automatically to
-     * fit.
-     */
-    void setTargetSampleRate(sv_samplerate_t);
-
-    /**
-     * Return the sample rate set by the target audio device (or the
-     * source sample rate if the target hasn't set one).
-     */
-    virtual sv_samplerate_t getTargetSampleRate() const;
-
-    /**
-     * Set the current output levels for metering (for call from the
-     * target)
-     */
-    void setOutputLevels(float left, float right);
-
-    /**
-     * Return the current (or thereabouts) output levels in the range
-     * 0.0 -> 1.0, for metering purposes.
-     */
-    virtual bool getOutputLevels(float &left, float &right);
-
-    /**
-     * Get the number of channels of audio that in the source models.
-     * This may safely be called from a realtime thread.  Returns 0 if
-     * there is no source yet available.
-     */
-    int getSourceChannelCount() const;
-
-    /**
-     * Get the number of channels of audio that will be provided
-     * to the play target.  This may be more than the source channel
-     * count: for example, a mono source will provide 2 channels
-     * after pan.
-     * This may safely be called from a realtime thread.  Returns 0 if
-     * there is no source yet available.
-     */
-    int getTargetChannelCount() const;
-
-    /**
-     * Get the actual sample rate of the source material.  This may
-     * safely be called from a realtime thread.  Returns 0 if there is
-     * no source yet available.
-     */
-    virtual sv_samplerate_t getSourceSampleRate() const;
-
-    /**
-     * Get "count" samples (at the target sample rate) of the mixed
-     * audio data, in all channels.  This may safely be called from a
-     * realtime thread.
-     */
-    sv_frame_t getSourceSamples(sv_frame_t count, float **buffer);
-
-    /**
-     * Set the time stretcher factor (i.e. playback speed).
-     */
-    void setTimeStretch(double factor);
-
-    /**
-     * Set the resampler quality, 0 - 2 where 0 is fastest and 2 is
-     * highest quality.
-     */
-    void setResampleQuality(int q);
-
-    /**
-     * Set a single real-time plugin as a processing effect for
-     * auditioning during playback.
-     *
-     * The plugin must have been initialised with
-     * getTargetChannelCount() channels and a getTargetBlockSize()
-     * sample frame processing block size.
-     *
-     * This playback source takes ownership of the plugin, which will
-     * be deleted at some point after the following call to
-     * setAuditioningEffect (depending on real-time constraints).
-     *
-     * Pass a null pointer to remove the current auditioning plugin,
-     * if any.
-     */
-    void setAuditioningEffect(Auditionable *plugin);
-
-    /**
-     * Specify that only the given set of models should be played.
-     */
-    void setSoloModelSet(std::set<Model *>s);
-
-    /**
-     * Specify that all models should be played as normal (if not
-     * muted).
-     */
-    void clearSoloModelSet();
-
-    QString getClientName() const { return m_clientName; }
-
-signals:
-    void modelReplaced();
-
-    void playStatusChanged(bool isPlaying);
-
-    void sampleRateMismatch(sv_samplerate_t requested,
-                            sv_samplerate_t available,
-                            bool willResample);
-
-    void audioOverloadPluginDisabled();
-    void audioTimeStretchMultiChannelDisabled();
-
-    void activity(QString);
-
-public slots:
-    void audioProcessingOverload();
-
-protected slots:
-    void selectionChanged();
-    void playLoopModeChanged();
-    void playSelectionModeChanged();
-    void playParametersChanged(PlayParameters *);
-    void preferenceChanged(PropertyContainer::PropertyName);
-    void modelChangedWithin(sv_frame_t startFrame, sv_frame_t endFrame);
-
-protected:
-    ViewManagerBase                  *m_viewManager;
-    AudioGenerator                   *m_audioGenerator;
-    QString                           m_clientName;
-
-    class RingBufferVector : public std::vector<RingBuffer<float> *> {
-    public:
-	virtual ~RingBufferVector() {
-	    while (!empty()) {
-		delete *begin();
-		erase(begin());
-	    }
-	}
-    };
-
-    std::set<Model *>                 m_models;
-    RingBufferVector                 *m_readBuffers;
-    RingBufferVector                 *m_writeBuffers;
-    sv_frame_t                        m_readBufferFill;
-    sv_frame_t                        m_writeBufferFill;
-    Scavenger<RingBufferVector>       m_bufferScavenger;
-    int                               m_sourceChannelCount;
-    sv_frame_t                        m_blockSize;
-    sv_samplerate_t                   m_sourceSampleRate;
-    sv_samplerate_t                   m_targetSampleRate;
-    sv_frame_t                        m_playLatency;
-    AudioCallbackPlayTarget          *m_target;
-    double                            m_lastRetrievalTimestamp;
-    sv_frame_t                        m_lastRetrievedBlockSize;
-    bool                              m_trustworthyTimestamps;
-    sv_frame_t                        m_lastCurrentFrame;
-    bool                              m_playing;
-    bool                              m_exiting;
-    sv_frame_t                        m_lastModelEndFrame;
-    int                               m_ringBufferSize;
-    float                             m_outputLeft;
-    float                             m_outputRight;
-    RealTimePluginInstance           *m_auditioningPlugin;
-    bool                              m_auditioningPluginBypassed;
-    Scavenger<RealTimePluginInstance> m_pluginScavenger;
-    sv_frame_t                        m_playStartFrame;
-    bool                              m_playStartFramePassed;
-    RealTime                          m_playStartedAt;
-
-    RingBuffer<float> *getWriteRingBuffer(int c) {
-	if (m_writeBuffers && c < (int)m_writeBuffers->size()) {
-	    return (*m_writeBuffers)[c];
-	} else {
-	    return 0;
-	}
-    }
-
-    RingBuffer<float> *getReadRingBuffer(int c) {
-	RingBufferVector *rb = m_readBuffers;
-	if (rb && c < (int)rb->size()) {
-	    return (*rb)[c];
-	} else {
-	    return 0;
-	}
-    }
-
-    void clearRingBuffers(bool haveLock = false, int count = 0);
-    void unifyRingBuffers();
-
-    RubberBand::RubberBandStretcher *m_timeStretcher;
-    RubberBand::RubberBandStretcher *m_monoStretcher;
-    double m_stretchRatio;
-    bool m_stretchMono;
-    
-    int m_stretcherInputCount;
-    float **m_stretcherInputs;
-    sv_frame_t *m_stretcherInputSizes;
-
-    // Called from fill thread, m_playing true, mutex held
-    // Return true if work done
-    bool fillBuffers();
-    
-    // Called from fillBuffers.  Return the number of frames written,
-    // which will be count or fewer.  Return in the frame argument the
-    // new buffered frame position (which may be earlier than the
-    // frame argument passed in, in the case of looping).
-    sv_frame_t mixModels(sv_frame_t &frame, sv_frame_t count, float **buffers);
-
-    // Called from getSourceSamples.
-    void applyAuditioningEffect(sv_frame_t count, float **buffers);
-
-    // Ranges of current selections, if play selection is active
-    std::vector<RealTime> m_rangeStarts;
-    std::vector<RealTime> m_rangeDurations;
-    void rebuildRangeLists();
-
-    sv_frame_t getCurrentFrame(RealTime outputLatency);
-
-    class FillThread : public Thread
-    {
-    public:
-	FillThread(AudioCallbackPlaySource &source) :
-            Thread(Thread::NonRTThread),
-	    m_source(source) { }
-
-	virtual void run();
-
-    protected:
-	AudioCallbackPlaySource &m_source;
-    };
-
-    QMutex m_mutex;
-    QWaitCondition m_condition;
-    FillThread *m_fillThread;
-    SRC_STATE *m_converter;
-    SRC_STATE *m_crapConverter; // for use when playing very fast
-    int m_resampleQuality;
-    void initialiseConverter();
-};
-
-#endif
-
-
--- a/audioio/AudioCallbackPlayTarget.cpp	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#include "AudioCallbackPlayTarget.h"
-#include "AudioCallbackPlaySource.h"
-
-#include <iostream>
-
-AudioCallbackPlayTarget::AudioCallbackPlayTarget(AudioCallbackPlaySource *source) :
-    m_source(source),
-    m_outputGain(1.0)
-{
-    if (m_source) {
-	connect(m_source, SIGNAL(modelReplaced()),
-		this, SLOT(sourceModelReplaced()));
-    }
-}
-
-AudioCallbackPlayTarget::~AudioCallbackPlayTarget()
-{
-}
-
-void
-AudioCallbackPlayTarget::setOutputGain(float gain)
-{
-    m_outputGain = gain;
-}
-
--- a/audioio/AudioCallbackPlayTarget.h	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef _AUDIO_CALLBACK_PLAY_TARGET_H_
-#define _AUDIO_CALLBACK_PLAY_TARGET_H_
-
-#include <QObject>
-
-class AudioCallbackPlaySource;
-
-class AudioCallbackPlayTarget : public QObject
-{
-    Q_OBJECT
-
-public:
-    AudioCallbackPlayTarget(AudioCallbackPlaySource *source);
-    virtual ~AudioCallbackPlayTarget();
-
-    virtual bool isOK() const = 0;
-
-    virtual void shutdown() = 0;
-
-    virtual double getCurrentTime() const = 0;
-
-    float getOutputGain() const {
-	return m_outputGain;
-    }
-
-public slots:
-    /**
-     * Set the playback gain (0.0 = silence, 1.0 = levels unmodified)
-     */
-    virtual void setOutputGain(float gain);
-
-    /**
-     * The main source model (providing the playback sample rate) has
-     * been changed.  The target should query the source's sample
-     * rate, set its output sample rate accordingly, and call back on
-     * the source's setTargetSampleRate to indicate what sample rate
-     * it succeeded in setting at the output.  If this differs from
-     * the model rate, the source will resample.
-     */
-    virtual void sourceModelReplaced() = 0;
-
-protected:
-    AudioCallbackPlaySource *m_source;
-    float m_outputGain;
-};
-
-#endif
-
--- a/audioio/AudioGenerator.cpp	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,709 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#include "AudioGenerator.h"
-
-#include "base/TempDirectory.h"
-#include "base/PlayParameters.h"
-#include "base/PlayParameterRepository.h"
-#include "base/Pitch.h"
-#include "base/Exceptions.h"
-
-#include "data/model/NoteModel.h"
-#include "data/model/FlexiNoteModel.h"
-#include "data/model/DenseTimeValueModel.h"
-#include "data/model/SparseTimeValueModel.h"
-#include "data/model/SparseOneDimensionalModel.h"
-#include "data/model/NoteData.h"
-
-#include "ClipMixer.h"
-#include "ContinuousSynth.h"
-
-#include <iostream>
-#include <cmath>
-
-#include <QDir>
-#include <QFile>
-
-const sv_frame_t
-AudioGenerator::m_processingBlockSize = 1024;
-
-QString
-AudioGenerator::m_sampleDir = "";
-
-//#define DEBUG_AUDIO_GENERATOR 1
-
-AudioGenerator::AudioGenerator() :
-    m_sourceSampleRate(0),
-    m_targetChannelCount(1),
-    m_waveType(0),
-    m_soloing(false),
-    m_channelBuffer(0),
-    m_channelBufSiz(0),
-    m_channelBufCount(0)
-{
-    initialiseSampleDir();
-
-    connect(PlayParameterRepository::getInstance(),
-            SIGNAL(playClipIdChanged(const Playable *, QString)),
-            this,
-            SLOT(playClipIdChanged(const Playable *, QString)));
-}
-
-AudioGenerator::~AudioGenerator()
-{
-#ifdef DEBUG_AUDIO_GENERATOR
-    SVDEBUG << "AudioGenerator::~AudioGenerator" << endl;
-#endif
-}
-
-void
-AudioGenerator::initialiseSampleDir()
-{
-    if (m_sampleDir != "") return;
-
-    try {
-        m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
-    } catch (DirectoryCreationFailed f) {
-        cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
-                  << " Failed to create temporary sample directory"
-                  << endl;
-        m_sampleDir = "";
-        return;
-    }
-
-    QDir sampleResourceDir(":/samples", "*.wav");
-
-    for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
-
-        QString fileName(sampleResourceDir[i]);
-        QFile file(sampleResourceDir.filePath(fileName));
-        QString target = QDir(m_sampleDir).filePath(fileName);
-
-        if (!file.copy(target)) {
-            cerr << "WARNING: AudioGenerator::getSampleDir: "
-                      << "Unable to copy " << fileName
-                      << " into temporary directory \""
-                      << m_sampleDir << "\"" << endl;
-        } else {
-            QFile tf(target);
-            tf.setPermissions(tf.permissions() |
-                              QFile::WriteOwner |
-                              QFile::WriteUser);
-        }
-    }
-}
-
-bool
-AudioGenerator::addModel(Model *model)
-{
-    if (m_sourceSampleRate == 0) {
-
-	m_sourceSampleRate = model->getSampleRate();
-
-    } else {
-
-	DenseTimeValueModel *dtvm =
-	    dynamic_cast<DenseTimeValueModel *>(model);
-
-	if (dtvm) {
-	    m_sourceSampleRate = model->getSampleRate();
-	    return true;
-	}
-    }
-
-    const Playable *playable = model;
-    if (!playable || !playable->canPlay()) return 0;
-
-    PlayParameters *parameters =
-	PlayParameterRepository::getInstance()->getPlayParameters(playable);
-
-    bool willPlay = !parameters->isPlayMuted();
-    
-    if (usesClipMixer(model)) {
-        ClipMixer *mixer = makeClipMixerFor(model);
-        if (mixer) {
-            QMutexLocker locker(&m_mutex);
-            m_clipMixerMap[model] = mixer;
-            return willPlay;
-        }
-    }
-
-    if (usesContinuousSynth(model)) {
-        ContinuousSynth *synth = makeSynthFor(model);
-        if (synth) {
-            QMutexLocker locker(&m_mutex);
-            m_continuousSynthMap[model] = synth;
-            return willPlay;
-        }
-    }
-
-    return false;
-}
-
-void
-AudioGenerator::playClipIdChanged(const Playable *playable, QString)
-{
-    const Model *model = dynamic_cast<const Model *>(playable);
-    if (!model) {
-        cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
-                  << playable << " is not a supported model type"
-                  << endl;
-        return;
-    }
-
-    if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
-
-    ClipMixer *mixer = makeClipMixerFor(model);
-    if (mixer) {
-        QMutexLocker locker(&m_mutex);
-        m_clipMixerMap[model] = mixer;
-    }
-}
-
-bool
-AudioGenerator::usesClipMixer(const Model *model)
-{
-    bool clip = 
-        (qobject_cast<const SparseOneDimensionalModel *>(model) ||
-         qobject_cast<const NoteModel *>(model) ||
-         qobject_cast<const FlexiNoteModel *>(model));
-    return clip;
-}
-
-bool
-AudioGenerator::wantsQuieterClips(const Model *model)
-{
-    // basically, anything that usually has sustain (like notes) or
-    // often has multiple sounds at once (like notes) wants to use a
-    // quieter level than simple click tracks
-    bool does = 
-        (qobject_cast<const NoteModel *>(model) ||
-         qobject_cast<const FlexiNoteModel *>(model));
-    return does;
-}
-
-bool
-AudioGenerator::usesContinuousSynth(const Model *model)
-{
-    bool cont = 
-        (qobject_cast<const SparseTimeValueModel *>(model));
-    return cont;
-}
-
-ClipMixer *
-AudioGenerator::makeClipMixerFor(const Model *model)
-{
-    QString clipId;
-
-    const Playable *playable = model;
-    if (!playable || !playable->canPlay()) return 0;
-
-    PlayParameters *parameters =
-	PlayParameterRepository::getInstance()->getPlayParameters(playable);
-    if (parameters) {
-        clipId = parameters->getPlayClipId();
-    }
-
-#ifdef DEBUG_AUDIO_GENERATOR
-    std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
-#endif
-
-    if (clipId == "") {
-        SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
-        return 0;
-    }
-
-    ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
-                                     m_sourceSampleRate,
-                                     m_processingBlockSize);
-
-    double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required
-
-    QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
-
-    double level = wantsQuieterClips(model) ? 0.5 : 1.0;
-    if (!mixer->loadClipData(clipPath, clipF0, level)) {
-        delete mixer;
-        return 0;
-    }
-
-#ifdef DEBUG_AUDIO_GENERATOR
-    std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
-#endif
-
-    return mixer;
-}
-
-ContinuousSynth *
-AudioGenerator::makeSynthFor(const Model *model)
-{
-    const Playable *playable = model;
-    if (!playable || !playable->canPlay()) return 0;
-
-    ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
-                                                 m_sourceSampleRate,
-                                                 m_processingBlockSize,
-                                                 m_waveType);
-
-#ifdef DEBUG_AUDIO_GENERATOR
-    std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
-#endif
-
-    return synth;
-}
-
-void
-AudioGenerator::removeModel(Model *model)
-{
-    SparseOneDimensionalModel *sodm =
-	dynamic_cast<SparseOneDimensionalModel *>(model);
-    if (!sodm) return; // nothing to do
-
-    QMutexLocker locker(&m_mutex);
-
-    if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
-
-    ClipMixer *mixer = m_clipMixerMap[sodm];
-    m_clipMixerMap.erase(sodm);
-    delete mixer;
-}
-
-void
-AudioGenerator::clearModels()
-{
-    QMutexLocker locker(&m_mutex);
-
-    while (!m_clipMixerMap.empty()) {
-        ClipMixer *mixer = m_clipMixerMap.begin()->second;
-	m_clipMixerMap.erase(m_clipMixerMap.begin());
-	delete mixer;
-    }
-}    
-
-void
-AudioGenerator::reset()
-{
-    QMutexLocker locker(&m_mutex);
-
-#ifdef DEBUG_AUDIO_GENERATOR
-    cerr << "AudioGenerator::reset()" << endl;
-#endif
-
-    for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
-	if (i->second) {
-	    i->second->reset();
-	}
-    }
-
-    m_noteOffs.clear();
-}
-
-void
-AudioGenerator::setTargetChannelCount(int targetChannelCount)
-{
-    if (m_targetChannelCount == targetChannelCount) return;
-
-//    SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
-
-    QMutexLocker locker(&m_mutex);
-    m_targetChannelCount = targetChannelCount;
-
-    for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
-	if (i->second) i->second->setChannelCount(targetChannelCount);
-    }
-}
-
-sv_frame_t
-AudioGenerator::getBlockSize() const
-{
-    return m_processingBlockSize;
-}
-
-void
-AudioGenerator::setSoloModelSet(std::set<Model *> s)
-{
-    QMutexLocker locker(&m_mutex);
-
-    m_soloModelSet = s;
-    m_soloing = true;
-}
-
-void
-AudioGenerator::clearSoloModelSet()
-{
-    QMutexLocker locker(&m_mutex);
-
-    m_soloModelSet.clear();
-    m_soloing = false;
-}
-
-sv_frame_t
-AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
-			 float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut)
-{
-    if (m_sourceSampleRate == 0) {
-	cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
-	return frameCount;
-    }
-
-    QMutexLocker locker(&m_mutex);
-
-    Playable *playable = model;
-    if (!playable || !playable->canPlay()) return frameCount;
-
-    PlayParameters *parameters =
-	PlayParameterRepository::getInstance()->getPlayParameters(playable);
-    if (!parameters) return frameCount;
-
-    bool playing = !parameters->isPlayMuted();
-    if (!playing) {
-#ifdef DEBUG_AUDIO_GENERATOR
-        cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
-#endif
-        return frameCount;
-    }
-
-    if (m_soloing) {
-        if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
-#ifdef DEBUG_AUDIO_GENERATOR
-            cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
-#endif
-            return frameCount;
-        }
-    }
-
-    float gain = parameters->getPlayGain();
-    float pan = parameters->getPlayPan();
-
-    DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
-    if (dtvm) {
-	return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
-				      buffer, gain, pan, fadeIn, fadeOut);
-    }
-
-    if (usesClipMixer(model)) {
-        return mixClipModel(model, startFrame, frameCount,
-                            buffer, gain, pan);
-    }
-
-    if (usesContinuousSynth(model)) {
-        return mixContinuousSynthModel(model, startFrame, frameCount,
-                                       buffer, gain, pan);
-    }
-
-    std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
-
-    return frameCount;
-}
-
-sv_frame_t
-AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
-				       sv_frame_t startFrame, sv_frame_t frames,
-				       float **buffer, float gain, float pan,
-				       sv_frame_t fadeIn, sv_frame_t fadeOut)
-{
-    sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut);
-
-    int modelChannels = dtvm->getChannelCount();
-
-    if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
-
-        for (int c = 0; c < m_channelBufCount; ++c) {
-            delete[] m_channelBuffer[c];
-        }
-
-	delete[] m_channelBuffer;
-        m_channelBuffer = new float *[modelChannels];
-
-        for (int c = 0; c < modelChannels; ++c) {
-            m_channelBuffer[c] = new float[maxFrames];
-        }
-
-        m_channelBufCount = modelChannels;
-	m_channelBufSiz = maxFrames;
-    }
-
-    sv_frame_t got = 0;
-
-    if (startFrame >= fadeIn/2) {
-        got = dtvm->getMultiChannelData(0, modelChannels - 1,
-                                        startFrame - fadeIn/2,
-                                        frames + fadeOut/2 + fadeIn/2,
-                                        m_channelBuffer);
-    } else {
-        sv_frame_t missing = fadeIn/2 - startFrame;
-
-        for (int c = 0; c < modelChannels; ++c) {
-            m_channelBuffer[c] += missing;
-        }
-
-        if (missing > 0) {
-            cerr << "note: channelBufSiz = " << m_channelBufSiz
-                 << ", frames + fadeOut/2 = " << frames + fadeOut/2 
-                 << ", startFrame = " << startFrame 
-                 << ", missing = " << missing << endl;
-        }
-
-        got = dtvm->getMultiChannelData(0, modelChannels - 1,
-                                        startFrame,
-                                        frames + fadeOut/2,
-                                        m_channelBuffer);
-
-        for (int c = 0; c < modelChannels; ++c) {
-            m_channelBuffer[c] -= missing;
-        }
-
-        got += missing;
-    }	    
-
-    for (int c = 0; c < m_targetChannelCount; ++c) {
-
-	int sourceChannel = (c % modelChannels);
-
-//	SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
-
-	float channelGain = gain;
-	if (pan != 0.0) {
-	    if (c == 0) {
-		if (pan > 0.0) channelGain *= 1.0f - pan;
-	    } else {
-		if (pan < 0.0) channelGain *= pan + 1.0f;
-	    }
-	}
-
-	for (sv_frame_t i = 0; i < fadeIn/2; ++i) {
-	    float *back = buffer[c];
-	    back -= fadeIn/2;
-	    back[i] +=
-                (channelGain * m_channelBuffer[sourceChannel][i] * float(i))
-                / float(fadeIn);
-	}
-
-	for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) {
-	    float mult = channelGain;
-	    if (i < fadeIn/2) {
-		mult = (mult * float(i)) / float(fadeIn);
-	    }
-	    if (i > frames - fadeOut/2) {
-		mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut);
-	    }
-            float val = m_channelBuffer[sourceChannel][i];
-            if (i >= got) val = 0.f;
-	    buffer[c][i] += mult * val;
-	}
-    }
-
-    return got;
-}
-  
-sv_frame_t
-AudioGenerator::mixClipModel(Model *model,
-                             sv_frame_t startFrame, sv_frame_t frames,
-                             float **buffer, float gain, float pan)
-{
-    ClipMixer *clipMixer = m_clipMixerMap[model];
-    if (!clipMixer) return 0;
-
-    int blocks = int(frames / m_processingBlockSize);
-    
-    //!!! todo: the below -- it matters
-
-    //!!! hang on -- the fact that the audio callback play source's
-    //buffer is a multiple of the plugin's buffer size doesn't mean
-    //that we always get called for a multiple of it here (because it
-    //also depends on the JACK block size).  how should we ensure that
-    //all models write the same amount in to the mix, and that we
-    //always have a multiple of the plugin buffer size?  I guess this
-    //class has to be queryable for the plugin buffer size & the
-    //callback play source has to use that as a multiple for all the
-    //calls to mixModel
-
-    sv_frame_t got = blocks * m_processingBlockSize;
-
-#ifdef DEBUG_AUDIO_GENERATOR
-    cout << "mixModel [clip]: start " << startFrame << ", frames " << frames
-         << ", blocks " << blocks << ", have " << m_noteOffs.size()
-         << " note-offs" << endl;
-#endif
-
-    ClipMixer::NoteStart on;
-    ClipMixer::NoteEnd off;
-
-    NoteOffSet &noteOffs = m_noteOffs[model];
-
-    float **bufferIndexes = new float *[m_targetChannelCount];
-
-    for (int i = 0; i < blocks; ++i) {
-
-	sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
-
-        NoteList notes;
-        NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
-        if (exportable) {
-            notes = exportable->getNotesWithin(reqStart,
-                                               reqStart + m_processingBlockSize);
-        }
-
-        std::vector<ClipMixer::NoteStart> starts;
-        std::vector<ClipMixer::NoteEnd> ends;
-
-	for (NoteList::const_iterator ni = notes.begin();
-             ni != notes.end(); ++ni) {
-
-	    sv_frame_t noteFrame = ni->start;
-
-	    if (noteFrame < reqStart ||
-		noteFrame >= reqStart + m_processingBlockSize) continue;
-
-	    while (noteOffs.begin() != noteOffs.end() &&
-		   noteOffs.begin()->frame <= noteFrame) {
-
-                sv_frame_t eventFrame = noteOffs.begin()->frame;
-                if (eventFrame < reqStart) eventFrame = reqStart;
-
-                off.frameOffset = eventFrame - reqStart;
-                off.frequency = noteOffs.begin()->frequency;
-
-#ifdef DEBUG_AUDIO_GENERATOR
-		cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
-#endif
-
-                ends.push_back(off);
-		noteOffs.erase(noteOffs.begin());
-	    }
-
-            on.frameOffset = noteFrame - reqStart;
-            on.frequency = ni->getFrequency();
-            on.level = float(ni->velocity) / 127.0f;
-            on.pan = pan;
-
-#ifdef DEBUG_AUDIO_GENERATOR
-	    cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
-#endif
-	    
-            starts.push_back(on);
-	    noteOffs.insert
-                (NoteOff(on.frequency, noteFrame + ni->duration));
-	}
-
-	while (noteOffs.begin() != noteOffs.end() &&
-	       noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
-
-            sv_frame_t eventFrame = noteOffs.begin()->frame;
-            if (eventFrame < reqStart) eventFrame = reqStart;
-
-            off.frameOffset = eventFrame - reqStart;
-            off.frequency = noteOffs.begin()->frequency;
-
-#ifdef DEBUG_AUDIO_GENERATOR
-            cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
-#endif
-
-            ends.push_back(off);
-            noteOffs.erase(noteOffs.begin());
-	}
-
-	for (int c = 0; c < m_targetChannelCount; ++c) {
-            bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
-        }
-
-        clipMixer->mix(bufferIndexes, gain, starts, ends);
-    }
-
-    delete[] bufferIndexes;
-
-    return got;
-}
-
-sv_frame_t
-AudioGenerator::mixContinuousSynthModel(Model *model,
-                                        sv_frame_t startFrame,
-                                        sv_frame_t frames,
-                                        float **buffer,
-                                        float gain, 
-                                        float pan)
-{
-    ContinuousSynth *synth = m_continuousSynthMap[model];
-    if (!synth) return 0;
-
-    // only type we support here at the moment
-    SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
-    if (stvm->getScaleUnits() != "Hz") return 0;
-
-    int blocks = int(frames / m_processingBlockSize);
-
-    //!!! todo: see comment in mixClipModel
-
-    sv_frame_t got = blocks * m_processingBlockSize;
-
-#ifdef DEBUG_AUDIO_GENERATOR
-    cout << "mixModel [synth]: frames " << frames
-	      << ", blocks " << blocks << endl;
-#endif
-    
-    float **bufferIndexes = new float *[m_targetChannelCount];
-
-    for (int i = 0; i < blocks; ++i) {
-
-	sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
-
-	for (int c = 0; c < m_targetChannelCount; ++c) {
-            bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
-        }
-
-        SparseTimeValueModel::PointList points = 
-            stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
-
-        // by default, repeat last frequency
-        float f0 = 0.f;
-
-        // go straight to the last freq that is genuinely in this range
-        for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
-             itr != points.begin(); ) {
-            --itr;
-            if (itr->frame >= reqStart &&
-                itr->frame < reqStart + m_processingBlockSize) {
-                f0 = itr->value;
-                break;
-            }
-        }
-
-        // if we found no such frequency and the next point is further
-        // away than twice the model resolution, go silent (same
-        // criterion TimeValueLayer uses for ending a discrete curve
-        // segment)
-        if (f0 == 0.f) {
-            SparseTimeValueModel::PointList nextPoints = 
-                stvm->getNextPoints(reqStart + m_processingBlockSize);
-            if (nextPoints.empty() ||
-                nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
-                f0 = -1.f;
-            }
-        }
-
-//        cerr << "f0 = " << f0 << endl;
-
-        synth->mix(bufferIndexes,
-                   gain,
-                   pan,
-                   f0);
-    }
-
-    delete[] bufferIndexes;
-
-    return got;
-}
-
--- a/audioio/AudioGenerator.h	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,168 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef _AUDIO_GENERATOR_H_
-#define _AUDIO_GENERATOR_H_
-
-class Model;
-class NoteModel;
-class FlexiNoteModel;
-class DenseTimeValueModel;
-class SparseOneDimensionalModel;
-class Playable;
-class ClipMixer;
-class ContinuousSynth;
-
-#include <QObject>
-#include <QMutex>
-
-#include <set>
-#include <map>
-#include <vector>
-
-#include "base/BaseTypes.h"
-
-class AudioGenerator : public QObject
-{
-    Q_OBJECT
-
-public:
-    AudioGenerator();
-    virtual ~AudioGenerator();
-
-    /**
-     * Add a data model to be played from and initialise any necessary
-     * audio generation code.  Returns true if the model will be
-     * played.  The model will be added regardless of the return
-     * value.
-     */
-    virtual bool addModel(Model *model);
-
-    /**
-     * Remove a model.
-     */
-    virtual void removeModel(Model *model);
-
-    /**
-     * Remove all models.
-     */
-    virtual void clearModels();
-
-    /**
-     * Reset playback, clearing buffers and the like.
-     */
-    virtual void reset();
-
-    /**
-     * Set the target channel count.  The buffer parameter to mixModel
-     * must always point to at least this number of arrays.
-     */
-    virtual void setTargetChannelCount(int channelCount);
-
-    /**
-     * Return the internal processing block size.  The frameCount
-     * argument to all mixModel calls must be a multiple of this
-     * value.
-     */
-    virtual sv_frame_t getBlockSize() const;
-
-    /**
-     * Mix a single model into an output buffer.
-     */
-    virtual sv_frame_t mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
-			    float **buffer, sv_frame_t fadeIn = 0, sv_frame_t fadeOut = 0);
-
-    /**
-     * Specify that only the given set of models should be played.
-     */
-    virtual void setSoloModelSet(std::set<Model *>s);
-
-    /**
-     * Specify that all models should be played as normal (if not
-     * muted).
-     */
-    virtual void clearSoloModelSet();
-
-protected slots:
-    void playClipIdChanged(const Playable *, QString);
-
-protected:
-    sv_samplerate_t m_sourceSampleRate;
-    int m_targetChannelCount;
-    int m_waveType;
-
-    bool m_soloing;
-    std::set<Model *> m_soloModelSet;
-
-    struct NoteOff {
-
-        NoteOff(float _freq, sv_frame_t _frame) : frequency(_freq), frame(_frame) { }
-
-        float frequency;
-	sv_frame_t frame;
-
-	struct Comparator {
-	    bool operator()(const NoteOff &n1, const NoteOff &n2) const {
-		return n1.frame < n2.frame;
-	    }
-	};
-    };
-
-
-    typedef std::map<const Model *, ClipMixer *> ClipMixerMap;
-
-    typedef std::multiset<NoteOff, NoteOff::Comparator> NoteOffSet;
-    typedef std::map<const Model *, NoteOffSet> NoteOffMap;
-
-    typedef std::map<const Model *, ContinuousSynth *> ContinuousSynthMap;
-
-    QMutex m_mutex;
-
-    ClipMixerMap m_clipMixerMap;
-    NoteOffMap m_noteOffs;
-    static QString m_sampleDir;
-
-    ContinuousSynthMap m_continuousSynthMap;
-
-    bool usesClipMixer(const Model *);
-    bool wantsQuieterClips(const Model *);
-    bool usesContinuousSynth(const Model *);
-
-    ClipMixer *makeClipMixerFor(const Model *model);
-    ContinuousSynth *makeSynthFor(const Model *model);
-
-    static void initialiseSampleDir();
-
-    virtual sv_frame_t mixDenseTimeValueModel
-    (DenseTimeValueModel *model, sv_frame_t startFrame, sv_frame_t frameCount,
-     float **buffer, float gain, float pan, sv_frame_t fadeIn, sv_frame_t fadeOut);
-
-    virtual sv_frame_t mixClipModel
-    (Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
-     float **buffer, float gain, float pan);
-
-    virtual sv_frame_t mixContinuousSynthModel
-    (Model *model, sv_frame_t startFrame, sv_frame_t frameCount,
-     float **buffer, float gain, float pan);
-    
-    static const sv_frame_t m_processingBlockSize;
-
-    float **m_channelBuffer;
-    sv_frame_t m_channelBufSiz;
-    int m_channelBufCount;
-};
-
-#endif
-
--- a/audioio/AudioJACKTarget.cpp	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,487 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifdef HAVE_JACK
-
-#include "AudioJACKTarget.h"
-#include "AudioCallbackPlaySource.h"
-
-#include <iostream>
-#include <cmath>
-
-#include <alloca.h>
-
-//#define DEBUG_AUDIO_JACK_TARGET 1
-
-#ifdef BUILD_STATIC
-#ifdef Q_OS_LINUX
-
-// Some lunacy to enable JACK support in static builds.  JACK isn't
-// supposed to be linked statically, because it depends on a
-// consistent shared memory layout between client library and daemon,
-// so it's very fragile in the face of version mismatches.
-//
-// Therefore for static builds on Linux we avoid linking against JACK
-// at all during the build, instead using dlopen and runtime symbol
-// lookup to switch on JACK support at runtime.  The following big
-// mess (down to the #endifs) is the code that implements this.
-
-static void *symbol(const char *name)
-{
-    static bool attempted = false;
-    static void *library = 0;
-    static std::map<const char *, void *> symbols;
-    if (symbols.find(name) != symbols.end()) return symbols[name];
-    if (!library) {
-        if (!attempted) {
-            library = ::dlopen("libjack.so.1", RTLD_NOW);
-            if (!library) library = ::dlopen("libjack.so.0", RTLD_NOW);
-            if (!library) library = ::dlopen("libjack.so", RTLD_NOW);
-            if (!library) {
-                cerr << "WARNING: AudioJACKTarget: Failed to load JACK library: "
-                          << ::dlerror() << " (tried .so, .so.0, .so.1)"
-                          << endl;
-            }
-            attempted = true;
-        }
-        if (!library) return 0;
-    }
-    void *symbol = ::dlsym(library, name);
-    if (!symbol) {
-        cerr << "WARNING: AudioJACKTarget: Failed to locate symbol "
-                  << name << ": " << ::dlerror() << endl;
-    }
-    symbols[name] = symbol;
-    return symbol;
-}
-
-static jack_client_t *dynamic_jack_client_open(const char *client_name,
-                                               jack_options_t options,
-                                               jack_status_t *status, ...)
-{
-    typedef jack_client_t *(*func)(const char *client_name,
-                                   jack_options_t options,
-                                   jack_status_t *status, ...);
-    void *s = symbol("jack_client_open");
-    if (!s) return 0;
-    func f = (func)s;
-    return f(client_name, options, status); // varargs not supported here
-}
-
-static int dynamic_jack_set_process_callback(jack_client_t *client,
-                                             JackProcessCallback process_callback,
-                                             void *arg)
-{
-    typedef int (*func)(jack_client_t *client,
-                        JackProcessCallback process_callback,
-                        void *arg);
-    void *s = symbol("jack_set_process_callback");
-    if (!s) return 1;
-    func f = (func)s;
-    return f(client, process_callback, arg);
-}
-
-static int dynamic_jack_set_xrun_callback(jack_client_t *client,
-                                          JackXRunCallback xrun_callback,
-                                          void *arg)
-{
-    typedef int (*func)(jack_client_t *client,
-                        JackXRunCallback xrun_callback,
-                        void *arg);
-    void *s = symbol("jack_set_xrun_callback");
-    if (!s) return 1;
-    func f = (func)s;
-    return f(client, xrun_callback, arg);
-}
-
-static const char **dynamic_jack_get_ports(jack_client_t *client, 
-                                           const char *port_name_pattern, 
-                                           const char *type_name_pattern, 
-                                           unsigned long flags)
-{
-    typedef const char **(*func)(jack_client_t *client, 
-                                 const char *port_name_pattern, 
-                                 const char *type_name_pattern, 
-                                 unsigned long flags);
-    void *s = symbol("jack_get_ports");
-    if (!s) return 0;
-    func f = (func)s;
-    return f(client, port_name_pattern, type_name_pattern, flags);
-}
-
-static jack_port_t *dynamic_jack_port_register(jack_client_t *client,
-                                               const char *port_name,
-                                               const char *port_type,
-                                               unsigned long flags,
-                                               unsigned long buffer_size)
-{
-    typedef jack_port_t *(*func)(jack_client_t *client,
-                                 const char *port_name,
-                                 const char *port_type,
-                                 unsigned long flags,
-                                 unsigned long buffer_size);
-    void *s = symbol("jack_port_register");
-    if (!s) return 0;
-    func f = (func)s;
-    return f(client, port_name, port_type, flags, buffer_size);
-}
-
-static int dynamic_jack_connect(jack_client_t *client,
-                                const char *source,
-                                const char *dest)
-{
-    typedef int (*func)(jack_client_t *client,
-                        const char *source,
-                        const char *dest);
-    void *s = symbol("jack_connect");
-    if (!s) return 1;
-    func f = (func)s;
-    return f(client, source, dest);
-}
-
-static void *dynamic_jack_port_get_buffer(jack_port_t *port,
-                                          jack_nframes_t sz)
-{
-    typedef void *(*func)(jack_port_t *, jack_nframes_t);
-    void *s = symbol("jack_port_get_buffer");
-    if (!s) return 0;
-    func f = (func)s;
-    return f(port, sz);
-}
-
-static int dynamic_jack_port_unregister(jack_client_t *client,
-                                        jack_port_t *port)
-{
-    typedef int(*func)(jack_client_t *, jack_port_t *);
-    void *s = symbol("jack_port_unregister");
-    if (!s) return 0;
-    func f = (func)s;
-    return f(client, port);
-}
-
-static void dynamic_jack_port_get_latency_range(jack_port_t *port,
-                                                jack_latency_callback_mode_t mode,
-                                                jack_latency_range_t *range)
-{
-    typedef void (*func)(jack_port_t *, jack_latency_callback_mode_t, jack_latency_range_t *);
-    void *s = symbol("jack_port_get_latency_range");
-    if (!s) {
-        range->min = range->max = 0;
-        return;
-    }
-    func f = (func)s;
-    f(port, mode, range);
-}
-
-#define dynamic1(rv, name, argtype, failval) \
-    static rv dynamic_##name(argtype arg) { \
-        typedef rv (*func) (argtype); \
-        void *s = symbol(#name); \
-        if (!s) return failval; \
-        func f = (func) s; \
-        return f(arg); \
-    }
-
-dynamic1(jack_client_t *, jack_client_new, const char *, 0);
-dynamic1(jack_nframes_t, jack_get_buffer_size, jack_client_t *, 0);
-dynamic1(jack_nframes_t, jack_get_sample_rate, jack_client_t *, 0);
-dynamic1(int, jack_activate, jack_client_t *, 1);
-dynamic1(int, jack_deactivate, jack_client_t *, 1);
-dynamic1(int, jack_client_close, jack_client_t *, 1);
-dynamic1(jack_nframes_t, jack_frame_time, jack_client_t *, 0);
-dynamic1(const char *, jack_port_name, const jack_port_t *, 0);
-
-#define jack_client_new dynamic_jack_client_new
-#define jack_client_open dynamic_jack_client_open
-#define jack_get_buffer_size dynamic_jack_get_buffer_size
-#define jack_get_sample_rate dynamic_jack_get_sample_rate
-#define jack_set_process_callback dynamic_jack_set_process_callback
-#define jack_set_xrun_callback dynamic_jack_set_xrun_callback
-#define jack_activate dynamic_jack_activate
-#define jack_deactivate dynamic_jack_deactivate
-#define jack_client_close dynamic_jack_client_close
-#define jack_frame_time dynamic_jack_frame_time
-#define jack_get_ports dynamic_jack_get_ports
-#define jack_port_register dynamic_jack_port_register
-#define jack_port_unregister dynamic_jack_port_unregister
-#define jack_port_name dynamic_jack_port_name
-#define jack_connect dynamic_jack_connect
-#define jack_port_get_buffer dynamic_jack_port_get_buffer
-
-#endif
-#endif
-
-AudioJACKTarget::AudioJACKTarget(AudioCallbackPlaySource *source) :
-    AudioCallbackPlayTarget(source),
-    m_client(0),
-    m_bufferSize(0),
-    m_sampleRate(0),
-    m_done(false)
-{
-    JackOptions options = JackNullOption;
-#ifdef HAVE_PORTAUDIO_2_0
-    options = JackNoStartServer;
-#endif
-#ifdef HAVE_LIBPULSE
-    options = JackNoStartServer;
-#endif
-
-    JackStatus status = JackStatus(0);
-    m_client = jack_client_open(source->getClientName().toLocal8Bit().data(),
-                                options, &status);
-    
-    if (!m_client) {
-        cerr << "AudioJACKTarget: Failed to connect to JACK server: status code "
-                  << status << endl;
-        return;
-    }
-
-    m_bufferSize = jack_get_buffer_size(m_client);
-    m_sampleRate = jack_get_sample_rate(m_client);
-
-    jack_set_xrun_callback(m_client, xrunStatic, this);
-    jack_set_process_callback(m_client, processStatic, this);
-
-    if (jack_activate(m_client)) {
-	cerr << "ERROR: AudioJACKTarget: Failed to activate JACK client"
-		  << endl;
-    }
-
-    if (m_source) {
-	sourceModelReplaced();
-    }
-    
-    // Mainstream JACK (though not jackdmp) calls mlockall() to lock
-    // down all memory for real-time operation.  That isn't a terribly
-    // good idea in an application like this that may have very high
-    // dynamic memory usage in other threads, as mlockall() applies
-    // across all threads.  We're far better off undoing it here and
-    // accepting the possible loss of true RT capability.
-    MUNLOCKALL();
-}
-
-AudioJACKTarget::~AudioJACKTarget()
-{
-    SVDEBUG << "AudioJACKTarget::~AudioJACKTarget()" << endl;
-
-    if (m_source) {
-        m_source->setTarget(0, m_bufferSize);
-    }
-
-    shutdown();
-
-    if (m_client) {
-
-        while (m_outputs.size() > 0) {
-            std::vector<jack_port_t *>::iterator itr = m_outputs.end();
-            --itr;
-            jack_port_t *port = *itr;
-            cerr << "unregister " << m_outputs.size() << endl;
-            if (port) jack_port_unregister(m_client, port);
-            m_outputs.erase(itr);
-        }
-        cerr << "Deactivating... ";
-	jack_deactivate(m_client);
-        cerr << "done\nClosing... ";
-	jack_client_close(m_client);
-        cerr << "done" << endl;
-    }
-
-    m_client = 0;
-
-    SVDEBUG << "AudioJACKTarget::~AudioJACKTarget() done" << endl;
-}
-
-void
-AudioJACKTarget::shutdown()
-{
-    m_done = true;
-}
-
-bool
-AudioJACKTarget::isOK() const
-{
-    return (m_client != 0);
-}
-
-double
-AudioJACKTarget::getCurrentTime() const
-{
-    if (m_client && m_sampleRate) {
-        return double(jack_frame_time(m_client)) / double(m_sampleRate);
-    } else {
-        return 0.0;
-    }
-}
-
-int
-AudioJACKTarget::processStatic(jack_nframes_t nframes, void *arg)
-{
-    return ((AudioJACKTarget *)arg)->process(nframes);
-}
-
-int
-AudioJACKTarget::xrunStatic(void *arg)
-{
-    return ((AudioJACKTarget *)arg)->xrun();
-}
-
-void
-AudioJACKTarget::sourceModelReplaced()
-{
-    m_mutex.lock();
-
-    m_source->setTarget(this, m_bufferSize);
-    m_source->setTargetSampleRate(m_sampleRate);
-
-    int channels = m_source->getSourceChannelCount();
-
-    // Because we offer pan, we always want at least 2 channels
-    if (channels < 2) channels = 2;
-
-    if (channels == (int)m_outputs.size() || !m_client) {
-	m_mutex.unlock();
-	return;
-    }
-
-    const char **ports =
-	jack_get_ports(m_client, NULL, NULL,
-		       JackPortIsPhysical | JackPortIsInput);
-    int physicalPortCount = 0;
-    while (ports[physicalPortCount]) ++physicalPortCount;
-
-#ifdef DEBUG_AUDIO_JACK_TARGET    
-    SVDEBUG << "AudioJACKTarget::sourceModelReplaced: have " << channels << " channels and " << physicalPortCount << " physical ports" << endl;
-#endif
-
-    while ((int)m_outputs.size() < channels) {
-
-        const int namelen = 30;
-	char name[namelen];
-	jack_port_t *port;
-
-	snprintf(name, namelen, "out %d", int(m_outputs.size() + 1));
-
-	port = jack_port_register(m_client,
-				  name,
-				  JACK_DEFAULT_AUDIO_TYPE,
-				  JackPortIsOutput,
-				  0);
-
-	if (!port) {
-	    cerr
-		<< "ERROR: AudioJACKTarget: Failed to create JACK output port "
-		<< m_outputs.size() << endl;
-	} else {
-            jack_latency_range_t range;
-            jack_port_get_latency_range(port, JackPlaybackLatency, &range);
-	    m_source->setTargetPlayLatency(range.max);
-            cerr << "AudioJACKTarget: output latency is " << range.max << endl;
-	}
-
-	if ((int)m_outputs.size() < physicalPortCount) {
-	    jack_connect(m_client, jack_port_name(port), ports[m_outputs.size()]);
-	}
-
-	m_outputs.push_back(port);
-    }
-
-    while ((int)m_outputs.size() > channels) {
-	std::vector<jack_port_t *>::iterator itr = m_outputs.end();
-	--itr;
-	jack_port_t *port = *itr;
-	if (port) jack_port_unregister(m_client, port);
-	m_outputs.erase(itr);
-    }
-
-    m_mutex.unlock();
-}
-
-int
-AudioJACKTarget::process(jack_nframes_t nframes)
-{
-    if (m_done) return 0;
-
-    if (!m_mutex.tryLock()) {
-	return 0;
-    }
-
-    if (m_outputs.empty()) {
-	m_mutex.unlock();
-	return 0;
-    }
-
-#ifdef DEBUG_AUDIO_JACK_TARGET    
-    cout << "AudioJACKTarget::process(" << nframes << "): have a source" << endl;
-#endif
-
-#ifdef DEBUG_AUDIO_JACK_TARGET    
-    if (m_bufferSize != nframes) {
-	cerr << "WARNING: m_bufferSize != nframes (" << m_bufferSize << " != " << nframes << ")" << endl;
-    }
-#endif
-
-    float **buffers = (float **)alloca(m_outputs.size() * sizeof(float *));
-
-    for (int ch = 0; ch < (int)m_outputs.size(); ++ch) {
-	buffers[ch] = (float *)jack_port_get_buffer(m_outputs[ch], nframes);
-    }
-
-    sv_frame_t received = 0;
-
-    if (m_source) {
-	received = m_source->getSourceSamples(nframes, buffers);
-    }
-
-    for (int ch = 0; ch < (int)m_outputs.size(); ++ch) {
-        for (sv_frame_t i = received; i < nframes; ++i) {
-            buffers[ch][i] = 0.0;
-        }
-    }
-
-    float peakLeft = 0.0, peakRight = 0.0;
-
-    for (int ch = 0; ch < (int)m_outputs.size(); ++ch) {
-
-	float peak = 0.0;
-
-	for (int i = 0; i < (int)nframes; ++i) {
-	    buffers[ch][i] *= m_outputGain;
-	    float sample = fabsf(buffers[ch][i]);
-	    if (sample > peak) peak = sample;
-	}
-
-	if (ch == 0) peakLeft = peak;
-	if (ch > 0 || m_outputs.size() == 1) peakRight = peak;
-    }
-	    
-    if (m_source) {
-	m_source->setOutputLevels(peakLeft, peakRight);
-    }
-
-    m_mutex.unlock();
-    return 0;
-}
-
-int
-AudioJACKTarget::xrun()
-{
-    cerr << "AudioJACKTarget: xrun!" << endl;
-    if (m_source) m_source->audioProcessingOverload();
-    return 0;
-}
-
-#endif /* HAVE_JACK */
-
--- a/audioio/AudioJACKTarget.h	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef _AUDIO_JACK_TARGET_H_
-#define _AUDIO_JACK_TARGET_H_
-
-#ifdef HAVE_JACK
-
-#include <jack/jack.h>
-#include <vector>
-
-#include "AudioCallbackPlayTarget.h"
-
-#include <QMutex>
-
-class AudioCallbackPlaySource;
-
-class AudioJACKTarget : public AudioCallbackPlayTarget
-{
-    Q_OBJECT
-
-public:
-    AudioJACKTarget(AudioCallbackPlaySource *source);
-    virtual ~AudioJACKTarget();
-
-    virtual void shutdown();
-
-    virtual bool isOK() const;
-
-    virtual double getCurrentTime() const;
-
-public slots:
-    virtual void sourceModelReplaced();
-
-protected:
-    int process(jack_nframes_t nframes);
-    int xrun();
-
-    static int processStatic(jack_nframes_t, void *);
-    static int xrunStatic(void *);
-
-    jack_client_t              *m_client;
-    std::vector<jack_port_t *>  m_outputs;
-    jack_nframes_t              m_bufferSize;
-    jack_nframes_t              m_sampleRate;
-    QMutex                      m_mutex;
-    bool                        m_done;
-};
-
-#endif /* HAVE_JACK */
-
-#endif
-
--- a/audioio/AudioPortAudioTarget.cpp	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,300 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifdef HAVE_PORTAUDIO_2_0
-
-#include "AudioPortAudioTarget.h"
-#include "AudioCallbackPlaySource.h"
-
-#include <iostream>
-#include <cassert>
-#include <cmath>
-
-#ifndef _WIN32
-#include <pthread.h>
-#endif
-
-//#define DEBUG_AUDIO_PORT_AUDIO_TARGET 1
-
-AudioPortAudioTarget::AudioPortAudioTarget(AudioCallbackPlaySource *source) :
-    AudioCallbackPlayTarget(source),
-    m_stream(0),
-    m_bufferSize(0),
-    m_sampleRate(0),
-    m_latency(0),
-    m_prioritySet(false),
-    m_done(false)
-{
-    PaError err;
-
-#ifdef DEBUG_AUDIO_PORT_AUDIO_TARGET
-    cerr << "AudioPortAudioTarget: Initialising for PortAudio v19" << endl;
-#endif
-
-    err = Pa_Initialize();
-    if (err != paNoError) {
-	cerr << "ERROR: AudioPortAudioTarget: Failed to initialize PortAudio: " << Pa_GetErrorText(err) << endl;
-	return;
-    }
-
-    m_bufferSize = 2048;
-    m_sampleRate = 44100;
-    if (m_source && (m_source->getSourceSampleRate() != 0)) {
-	m_sampleRate = int(m_source->getSourceSampleRate());
-    }
-
-    PaStreamParameters op;
-    op.device = Pa_GetDefaultOutputDevice();
-    op.channelCount = 2;
-    op.sampleFormat = paFloat32;
-    op.suggestedLatency = 0.2;
-    op.hostApiSpecificStreamInfo = 0;
-    err = Pa_OpenStream(&m_stream, 0, &op, m_sampleRate,
-                        paFramesPerBufferUnspecified,
-                        paNoFlag, processStatic, this);
-
-    if (err != paNoError) {
-
-        cerr << "WARNING: AudioPortAudioTarget: Failed to open PortAudio stream with default frames per buffer, trying again with fixed frames per buffer..." << endl;
-        
-        err = Pa_OpenStream(&m_stream, 0, &op, m_sampleRate,
-                            1024,
-                            paNoFlag, processStatic, this);
-	m_bufferSize = 1024;
-    }
-
-    if (err != paNoError) {
-	cerr << "ERROR: AudioPortAudioTarget: Failed to open PortAudio stream: " << Pa_GetErrorText(err) << endl;
-        cerr << "Note: device ID was " << op.device << endl;
-	m_stream = 0;
-	Pa_Terminate();
-	return;
-    }
-
-    const PaStreamInfo *info = Pa_GetStreamInfo(m_stream);
-    m_latency = int(info->outputLatency * m_sampleRate + 0.001);
-    if (m_bufferSize < m_latency) m_bufferSize = m_latency;
-
-    cerr << "PortAudio latency = " << m_latency << " frames" << endl;
-
-    err = Pa_StartStream(m_stream);
-
-    if (err != paNoError) {
-	cerr << "ERROR: AudioPortAudioTarget: Failed to start PortAudio stream: " << Pa_GetErrorText(err) << endl;
-	Pa_CloseStream(m_stream);
-	m_stream = 0;
-	Pa_Terminate();
-	return;
-    }
-
-    if (m_source) {
-	cerr << "AudioPortAudioTarget: block size " << m_bufferSize << endl;
-	m_source->setTarget(this, m_bufferSize);
-	m_source->setTargetSampleRate(m_sampleRate);
-	m_source->setTargetPlayLatency(m_latency);
-    }
-
-#ifdef DEBUG_PORT_AUDIO_TARGET
-    cerr << "AudioPortAudioTarget: initialised OK" << endl;
-#endif
-}
-
-AudioPortAudioTarget::~AudioPortAudioTarget()
-{
-    SVDEBUG << "AudioPortAudioTarget::~AudioPortAudioTarget()" << endl;
-
-    if (m_source) {
-        m_source->setTarget(0, m_bufferSize);
-    }
-
-    shutdown();
-
-    if (m_stream) {
-
-        SVDEBUG << "closing stream" << endl;
-
-	PaError err;
-	err = Pa_CloseStream(m_stream);
-	if (err != paNoError) {
-	    cerr << "ERROR: AudioPortAudioTarget: Failed to close PortAudio stream: " << Pa_GetErrorText(err) << endl;
-	}
-
-        cerr << "terminating" << endl;
-
-	err = Pa_Terminate();
-        if (err != paNoError) {
-            cerr << "ERROR: AudioPortAudioTarget: Failed to terminate PortAudio: " << Pa_GetErrorText(err) << endl;
-	}   
-    }
-
-    m_stream = 0;
-
-    SVDEBUG << "AudioPortAudioTarget::~AudioPortAudioTarget() done" << endl;
-}
-
-void 
-AudioPortAudioTarget::shutdown()
-{
-#ifdef DEBUG_PORT_AUDIO_TARGET
-    SVDEBUG << "AudioPortAudioTarget::shutdown" << endl;
-#endif
-    m_done = true;
-}
-
-bool
-AudioPortAudioTarget::isOK() const
-{
-    return (m_stream != 0);
-}
-
-double
-AudioPortAudioTarget::getCurrentTime() const
-{
-    if (!m_stream) return 0.0;
-    else return Pa_GetStreamTime(m_stream);
-}
-
-int
-AudioPortAudioTarget::processStatic(const void *input, void *output,
-                                    unsigned long nframes,
-                                    const PaStreamCallbackTimeInfo *timeInfo,
-                                    PaStreamCallbackFlags flags, void *data)
-{
-    return ((AudioPortAudioTarget *)data)->process(input, output,
-                                                   nframes, timeInfo,
-                                                   flags);
-}
-
-void
-AudioPortAudioTarget::sourceModelReplaced()
-{
-    m_source->setTargetSampleRate(m_sampleRate);
-}
-
-int
-AudioPortAudioTarget::process(const void *, void *outputBuffer,
-                              sv_frame_t nframes,
-                              const PaStreamCallbackTimeInfo *,
-                              PaStreamCallbackFlags)
-{
-#ifdef DEBUG_AUDIO_PORT_AUDIO_TARGET    
-    SVDEBUG << "AudioPortAudioTarget::process(" << nframes << ")" << endl;
-#endif
-
-    if (!m_source || m_done) {
-#ifdef DEBUG_AUDIO_PORT_AUDIO_TARGET
-        SVDEBUG << "AudioPortAudioTarget::process: Doing nothing, no source or application done" << endl;
-#endif
-        return 0;
-    }
-
-    if (!m_prioritySet) {
-#ifndef _WIN32
-        sched_param param;
-        param.sched_priority = 20;
-        if (pthread_setschedparam(pthread_self(), SCHED_RR, &param)) {
-            SVDEBUG << "AudioPortAudioTarget: NOTE: couldn't set RT scheduling class" << endl;
-        } else {
-            SVDEBUG << "AudioPortAudioTarget: NOTE: successfully set RT scheduling class" << endl;
-        }
-#endif
-        m_prioritySet = true;
-    }
-
-    float *output = (float *)outputBuffer;
-
-    assert(nframes <= m_bufferSize);
-
-    static float **tmpbuf = 0;
-    static int tmpbufch = 0;
-    static int tmpbufsz = 0;
-
-    int sourceChannels = m_source->getSourceChannelCount();
-
-    // Because we offer pan, we always want at least 2 channels
-    if (sourceChannels < 2) sourceChannels = 2;
-
-    if (!tmpbuf || tmpbufch != sourceChannels || int(tmpbufsz) < m_bufferSize) {
-
-	if (tmpbuf) {
-	    for (int i = 0; i < tmpbufch; ++i) {
-		delete[] tmpbuf[i];
-	    }
-	    delete[] tmpbuf;
-	}
-
-	tmpbufch = sourceChannels;
-	tmpbufsz = m_bufferSize;
-	tmpbuf = new float *[tmpbufch];
-
-	for (int i = 0; i < tmpbufch; ++i) {
-	    tmpbuf[i] = new float[tmpbufsz];
-	}
-    }
-	
-    sv_frame_t received = m_source->getSourceSamples(nframes, tmpbuf);
-
-    float peakLeft = 0.0, peakRight = 0.0;
-
-    for (int ch = 0; ch < 2; ++ch) {
-	
-	float peak = 0.0;
-
-	if (ch < sourceChannels) {
-
-	    // PortAudio samples are interleaved
-	    for (int i = 0; i < nframes; ++i) {
-                if (i < received) {
-                    output[i * 2 + ch] = tmpbuf[ch][i] * m_outputGain;
-                    float sample = fabsf(output[i * 2 + ch]);
-                    if (sample > peak) peak = sample;
-                } else {
-                    output[i * 2 + ch] = 0;
-                }
-	    }
-
-	} else if (ch == 1 && sourceChannels == 1) {
-
-	    for (int i = 0; i < nframes; ++i) {
-                if (i < received) {
-                    output[i * 2 + ch] = tmpbuf[0][i] * m_outputGain;
-                    float sample = fabsf(output[i * 2 + ch]);
-                    if (sample > peak) peak = sample;
-                } else {
-                    output[i * 2 + ch] = 0;
-                }
-	    }
-
-	} else {
-	    for (int i = 0; i < nframes; ++i) {
-		output[i * 2 + ch] = 0;
-	    }
-	}
-
-	if (ch == 0) peakLeft = peak;
-	if (ch > 0 || sourceChannels == 1) peakRight = peak;
-    }
-
-    m_source->setOutputLevels(peakLeft, peakRight);
-
-    if (Pa_GetStreamCpuLoad(m_stream) > 0.7) {
-        if (m_source) m_source->audioProcessingOverload();
-    }
-
-    return 0;
-}
-
-#endif /* HAVE_PORTAUDIO */
-
--- a/audioio/AudioPortAudioTarget.h	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,71 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef _AUDIO_PORT_AUDIO_TARGET_H_
-#define _AUDIO_PORT_AUDIO_TARGET_H_
-
-#ifdef HAVE_PORTAUDIO_2_0
-
-// This code requires PortAudio v19 -- it won't work with v18.
-
-#include <portaudio.h>
-
-#include <QObject>
-
-#include "AudioCallbackPlayTarget.h"
-
-#include "base/BaseTypes.h"
-
-class AudioCallbackPlaySource;
-
-class AudioPortAudioTarget : public AudioCallbackPlayTarget
-{
-    Q_OBJECT
-
-public:
-    AudioPortAudioTarget(AudioCallbackPlaySource *source);
-    virtual ~AudioPortAudioTarget();
-
-    virtual void shutdown();
-
-    virtual bool isOK() const;
-
-    virtual double getCurrentTime() const;
-
-public slots:
-    virtual void sourceModelReplaced();
-
-protected:
-    int process(const void *input, void *output, sv_frame_t frames,
-                const PaStreamCallbackTimeInfo *timeInfo,
-                PaStreamCallbackFlags statusFlags);
-
-    static int processStatic(const void *, void *, unsigned long,
-                             const PaStreamCallbackTimeInfo *,
-                             PaStreamCallbackFlags, void *);
-
-    PaStream *m_stream;
-
-    int m_bufferSize;
-    int m_sampleRate;
-    int m_latency;
-    bool m_prioritySet;
-    bool m_done;
-};
-
-#endif /* HAVE_PORTAUDIO */
-
-#endif
-
--- a/audioio/AudioPulseAudioTarget.cpp	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,416 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2008 QMUL.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifdef HAVE_LIBPULSE
-
-#include "AudioPulseAudioTarget.h"
-#include "AudioCallbackPlaySource.h"
-
-#include <QMutexLocker>
-
-#include <iostream>
-#include <cassert>
-#include <cmath>
-
-#define DEBUG_AUDIO_PULSE_AUDIO_TARGET 1
-//#define DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY 1
-
-AudioPulseAudioTarget::AudioPulseAudioTarget(AudioCallbackPlaySource *source) :
-    AudioCallbackPlayTarget(source),
-    m_mutex(QMutex::Recursive),
-    m_loop(0),
-    m_api(0),
-    m_context(0),
-    m_stream(0),
-    m_loopThread(0),
-    m_bufferSize(0),
-    m_sampleRate(0),
-    m_latency(0),
-    m_done(false)
-{
-#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET
-    cerr << "AudioPulseAudioTarget: Initialising for PulseAudio" << endl;
-#endif
-
-    m_loop = pa_mainloop_new();
-    if (!m_loop) {
-        cerr << "ERROR: AudioPulseAudioTarget: Failed to create main loop" << endl;
-        return;
-    }
-
-    m_api = pa_mainloop_get_api(m_loop);
-
-    //!!! handle signals how?
-
-    m_bufferSize = 20480;
-    m_sampleRate = 44100;
-    if (m_source && (m_source->getSourceSampleRate() != 0)) {
-	m_sampleRate = int(m_source->getSourceSampleRate());
-    }
-    m_spec.rate = m_sampleRate;
-    m_spec.channels = 2;
-    m_spec.format = PA_SAMPLE_FLOAT32NE;
-
-#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET
-    cerr << "AudioPulseAudioTarget: Creating context" << endl;
-#endif
-
-    m_context = pa_context_new(m_api, source->getClientName().toLocal8Bit().data());
-    if (!m_context) {
-        cerr << "ERROR: AudioPulseAudioTarget: Failed to create context object" << endl;
-        return;
-    }
-
-    pa_context_set_state_callback(m_context, contextStateChangedStatic, this);
-
-#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET
-    cerr << "AudioPulseAudioTarget: Connecting to default server..." << endl;
-#endif
-
-    pa_context_connect(m_context, 0, // default server
-                       (pa_context_flags_t)PA_CONTEXT_NOAUTOSPAWN, 0);
-
-#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET
-    cerr << "AudioPulseAudioTarget: Starting main loop" << endl;
-#endif
-
-    m_loopThread = new MainLoopThread(m_loop);
-    m_loopThread->start();
-
-#ifdef DEBUG_PULSE_AUDIO_TARGET
-    cerr << "AudioPulseAudioTarget: initialised OK" << endl;
-#endif
-}
-
-AudioPulseAudioTarget::~AudioPulseAudioTarget()
-{
-    SVDEBUG << "AudioPulseAudioTarget::~AudioPulseAudioTarget()" << endl;
-
-    if (m_source) {
-        m_source->setTarget(0, m_bufferSize);
-    }
-
-    shutdown();
-
-    QMutexLocker locker(&m_mutex);
-
-    if (m_stream) pa_stream_unref(m_stream);
-
-    if (m_context) pa_context_unref(m_context);
-
-    if (m_loop) {
-        pa_signal_done();
-        pa_mainloop_free(m_loop);
-    }
-
-    m_stream = 0;
-    m_context = 0;
-    m_loop = 0;
-
-    SVDEBUG << "AudioPulseAudioTarget::~AudioPulseAudioTarget() done" << endl;
-}
-
-void 
-AudioPulseAudioTarget::shutdown()
-{
-    m_done = true;
-}
-
-bool
-AudioPulseAudioTarget::isOK() const
-{
-    return (m_context != 0);
-}
-
-double
-AudioPulseAudioTarget::getCurrentTime() const
-{
-    if (!m_stream) return 0.0;
-    
-    pa_usec_t usec = 0;
-    pa_stream_get_time(m_stream, &usec);
-    return double(usec) / 1000000.0;
-}
-
-void
-AudioPulseAudioTarget::sourceModelReplaced()
-{
-    m_source->setTargetSampleRate(m_sampleRate);
-}
-
-void
-AudioPulseAudioTarget::streamWriteStatic(pa_stream *,
-                                         size_t length,
-                                         void *data)
-{
-    AudioPulseAudioTarget *target = (AudioPulseAudioTarget *)data;
-    
-//    assert(stream == target->m_stream);
-
-    target->streamWrite(length);
-}
-
-void
-AudioPulseAudioTarget::streamWrite(sv_frame_t requested)
-{
-#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY
-    cout << "AudioPulseAudioTarget::streamWrite(" << requested << ")" << endl;
-#endif
-    if (m_done) return;
-
-    QMutexLocker locker(&m_mutex);
-
-    pa_usec_t latency = 0;
-    int negative = 0;
-    if (!pa_stream_get_latency(m_stream, &latency, &negative)) {
-        int latframes = int(double(latency) / 1000000.0 * double(m_sampleRate));
-        if (latframes > 0) m_source->setTargetPlayLatency(latframes);
-    }
-
-    static float *output = 0;
-    static float **tmpbuf = 0;
-    static int tmpbufch = 0;
-    static sv_frame_t tmpbufsz = 0;
-
-    int sourceChannels = m_source->getSourceChannelCount();
-
-    // Because we offer pan, we always want at least 2 channels
-    if (sourceChannels < 2) sourceChannels = 2;
-
-    sv_frame_t nframes = requested / (sourceChannels * sizeof(float));
-
-    if (nframes > m_bufferSize) {
-        cerr << "WARNING: AudioPulseAudioTarget::streamWrite: nframes " << nframes << " > m_bufferSize " << m_bufferSize << endl;
-    }
-
-#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY
-    cout << "AudioPulseAudioTarget::streamWrite: nframes = " << nframes << endl;
-#endif
-
-    if (!tmpbuf || tmpbufch != sourceChannels || int(tmpbufsz) < nframes) {
-
-	if (tmpbuf) {
-	    for (int i = 0; i < tmpbufch; ++i) {
-		delete[] tmpbuf[i];
-	    }
-	    delete[] tmpbuf;
-	}
-
-        if (output) {
-            delete[] output;
-        }
-
-	tmpbufch = sourceChannels;
-	tmpbufsz = nframes;
-	tmpbuf = new float *[tmpbufch];
-
-	for (int i = 0; i < tmpbufch; ++i) {
-	    tmpbuf[i] = new float[tmpbufsz];
-	}
-
-        output = new float[tmpbufsz * tmpbufch];
-    }
-	
-    sv_frame_t received = m_source->getSourceSamples(nframes, tmpbuf);
-
-#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY
-    cerr << "requested " << nframes << ", received " << received << endl;
-
-    if (received < nframes) {
-        cerr << "*** WARNING: Wrong number of frames received" << endl;
-    }
-#endif
-
-    float peakLeft = 0.0, peakRight = 0.0;
-
-    for (int ch = 0; ch < 2; ++ch) {
-	
-	float peak = 0.0;
-
-        // PulseAudio samples are interleaved
-        for (int i = 0; i < nframes; ++i) {
-            if (i < received) {
-                output[i * 2 + ch] = tmpbuf[ch][i] * m_outputGain;
-                float sample = fabsf(output[i * 2 + ch]);
-                if (sample > peak) peak = sample;
-            } else {
-                output[i * 2 + ch] = 0;
-            }
-        }
-
-	if (ch == 0) peakLeft = peak;
-	if (ch == 1) peakRight = peak;
-    }
-
-#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY
-    SVDEBUG << "calling pa_stream_write with "
-              << nframes * tmpbufch * sizeof(float) << " bytes" << endl;
-#endif
-
-    pa_stream_write(m_stream, output,
-                    size_t(nframes * tmpbufch * sizeof(float)),
-                    0, 0, PA_SEEK_RELATIVE);
-
-    m_source->setOutputLevels(peakLeft, peakRight);
-
-    return;
-}
-
-void
-AudioPulseAudioTarget::streamStateChangedStatic(pa_stream *,
-                                                void *data)
-{
-    AudioPulseAudioTarget *target = (AudioPulseAudioTarget *)data;
-    
-//    assert(stream == target->m_stream);
-
-    target->streamStateChanged();
-}
-
-void
-AudioPulseAudioTarget::streamStateChanged()
-{
-#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET
-    SVDEBUG << "AudioPulseAudioTarget::streamStateChanged" << endl;
-#endif
-    QMutexLocker locker(&m_mutex);
-
-    switch (pa_stream_get_state(m_stream)) {
-
-    case PA_STREAM_UNCONNECTED:
-    case PA_STREAM_CREATING:
-    case PA_STREAM_TERMINATED:
-        break;
-
-    case PA_STREAM_READY:
-    {
-        SVDEBUG << "AudioPulseAudioTarget::streamStateChanged: Ready" << endl;
-        
-        pa_usec_t latency = 0;
-        int negative = 0;
-        if (pa_stream_get_latency(m_stream, &latency, &negative)) {
-            cerr << "AudioPulseAudioTarget::streamStateChanged: Failed to query latency" << endl;
-        }
-        cerr << "Latency = " << latency << " usec" << endl;
-        int latframes = int(double(latency) / 1000000.0 * m_sampleRate);
-        cerr << "that's " << latframes << " frames" << endl;
-
-        const pa_buffer_attr *attr;
-        if (!(attr = pa_stream_get_buffer_attr(m_stream))) {
-            SVDEBUG << "AudioPulseAudioTarget::streamStateChanged: Cannot query stream buffer attributes" << endl;
-            m_source->setTarget(this, m_bufferSize);
-            m_source->setTargetSampleRate(m_sampleRate);
-            if (latframes != 0) m_source->setTargetPlayLatency(latframes);
-        } else {
-            int targetLength = attr->tlength;
-            SVDEBUG << "AudioPulseAudioTarget::streamStateChanged: stream target length = " << targetLength << endl;
-            m_source->setTarget(this, targetLength);
-            m_source->setTargetSampleRate(m_sampleRate);
-            if (latframes == 0) latframes = targetLength;
-            cerr << "latency = " << latframes << endl;
-            m_source->setTargetPlayLatency(latframes);
-        }
-    }
-    break;
-    
-    case PA_STREAM_FAILED:
-    default:
-        cerr << "AudioPulseAudioTarget::streamStateChanged: Error: "
-             << pa_strerror(pa_context_errno(m_context)) << endl;
-        //!!! do something...
-        break;
-    }
-}
-
-void
-AudioPulseAudioTarget::contextStateChangedStatic(pa_context *,
-                                                 void *data)
-{
-    AudioPulseAudioTarget *target = (AudioPulseAudioTarget *)data;
-    
-//    assert(context == target->m_context);
-
-    target->contextStateChanged();
-}
-
-void
-AudioPulseAudioTarget::contextStateChanged()
-{
-#ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET
-    SVDEBUG << "AudioPulseAudioTarget::contextStateChanged" << endl;
-#endif
-    QMutexLocker locker(&m_mutex);
-
-    switch (pa_context_get_state(m_context)) {
-
-        case PA_CONTEXT_UNCONNECTED:
-        case PA_CONTEXT_CONNECTING:
-        case PA_CONTEXT_AUTHORIZING:
-        case PA_CONTEXT_SETTING_NAME:
-            break;
-
-        case PA_CONTEXT_READY:
-            SVDEBUG << "AudioPulseAudioTarget::contextStateChanged: Ready"
-                      << endl;
-
-            m_stream = pa_stream_new(m_context, "stream", &m_spec, 0);
-            assert(m_stream); //!!!
-            
-            pa_stream_set_state_callback(m_stream, streamStateChangedStatic, this);
-            pa_stream_set_write_callback(m_stream, streamWriteStatic, this);
-            pa_stream_set_overflow_callback(m_stream, streamOverflowStatic, this);
-            pa_stream_set_underflow_callback(m_stream, streamUnderflowStatic, this);
-            if (pa_stream_connect_playback
-                (m_stream, 0, 0,
-                 pa_stream_flags_t(PA_STREAM_INTERPOLATE_TIMING |
-                                   PA_STREAM_AUTO_TIMING_UPDATE),
-                 0, 0)) { //??? return value
-                cerr << "AudioPulseAudioTarget: Failed to connect playback stream" << endl;
-            }
-
-            break;
-
-        case PA_CONTEXT_TERMINATED:
-            SVDEBUG << "AudioPulseAudioTarget::contextStateChanged: Terminated" << endl;
-            //!!! do something...
-            break;
-
-        case PA_CONTEXT_FAILED:
-        default:
-            cerr << "AudioPulseAudioTarget::contextStateChanged: Error: "
-                      << pa_strerror(pa_context_errno(m_context)) << endl;
-            //!!! do something...
-            break;
-    }
-}
-
-void
-AudioPulseAudioTarget::streamOverflowStatic(pa_stream *, void *)
-{
-    SVDEBUG << "AudioPulseAudioTarget::streamOverflowStatic: Overflow!" << endl;
-}
-
-void
-AudioPulseAudioTarget::streamUnderflowStatic(pa_stream *, void *data)
-{
-    SVDEBUG << "AudioPulseAudioTarget::streamUnderflowStatic: Underflow!" << endl;
-    AudioPulseAudioTarget *target = (AudioPulseAudioTarget *)data;
-    if (target && target->m_source) {
-        target->m_source->audioProcessingOverload();
-    }
-}
-
-#endif /* HAVE_PULSEAUDIO */
-
--- a/audioio/AudioPulseAudioTarget.h	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,91 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2008 QMUL.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef _AUDIO_PULSE_AUDIO_TARGET_H_
-#define _AUDIO_PULSE_AUDIO_TARGET_H_
-
-#ifdef HAVE_LIBPULSE
-
-#include <pulse/pulseaudio.h>
-
-#include <QObject>
-#include <QMutex>
-#include "base/Thread.h"
-
-#include "AudioCallbackPlayTarget.h"
-
-class AudioCallbackPlaySource;
-
-class AudioPulseAudioTarget : public AudioCallbackPlayTarget
-{
-    Q_OBJECT
-
-public:
-    AudioPulseAudioTarget(AudioCallbackPlaySource *source);
-    virtual ~AudioPulseAudioTarget();
-
-    virtual void shutdown();
-
-    virtual bool isOK() const;
-
-    virtual double getCurrentTime() const;
-
-public slots:
-    virtual void sourceModelReplaced();
-
-protected:
-    void streamWrite(sv_frame_t);
-    void streamStateChanged();
-    void contextStateChanged();
-
-    static void streamWriteStatic(pa_stream *, size_t, void *);
-    static void streamStateChangedStatic(pa_stream *, void *);
-    static void streamOverflowStatic(pa_stream *, void *);
-    static void streamUnderflowStatic(pa_stream *, void *);
-    static void contextStateChangedStatic(pa_context *, void *);
-
-    QMutex m_mutex;
-
-    class MainLoopThread : public Thread
-    {
-    public:
-        MainLoopThread(pa_mainloop *loop) : Thread(NonRTThread), m_loop(loop) { } //!!! or RTThread
-        virtual void run() {
-            int rv = 0;
-            pa_mainloop_run(m_loop, &rv); //!!! check return value from this, and rv
-        }
-
-    private:
-        pa_mainloop *m_loop;
-    };
-
-    pa_mainloop *m_loop;
-    pa_mainloop_api *m_api;
-    pa_context *m_context;
-    pa_stream *m_stream;
-    pa_sample_spec m_spec;
-
-    MainLoopThread *m_loopThread;
-
-    int m_bufferSize;
-    int m_sampleRate;
-    int m_latency;
-    bool m_done;
-};
-
-#endif /* HAVE_PULSEAUDIO */
-
-#endif
-
--- a/audioio/AudioTargetFactory.cpp	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,164 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#include "AudioTargetFactory.h"
-
-#include "AudioJACKTarget.h"
-#include "AudioPortAudioTarget.h"
-#include "AudioPulseAudioTarget.h"
-
-#include "AudioCallbackPlayTarget.h"
-
-#include <QCoreApplication>
-
-#include <iostream>
-
-AudioTargetFactory *
-AudioTargetFactory::m_instance = 0;
-
-AudioTargetFactory *
-AudioTargetFactory::getInstance()
-{
-    if (!m_instance) m_instance = new AudioTargetFactory();
-    return m_instance;
-}
-
-AudioTargetFactory::AudioTargetFactory()
-{
-}
-
-std::vector<QString>
-AudioTargetFactory::getCallbackTargetNames(bool includeAuto) const
-{
-    std::vector<QString> names;
-    if (includeAuto) names.push_back("auto");
-
-#ifdef HAVE_JACK
-    names.push_back("jack");
-#endif
-
-#ifdef HAVE_LIBPULSE
-    names.push_back("pulse");
-#endif
-
-#ifdef HAVE_PORTAUDIO_2_0
-    names.push_back("port");
-#endif
-
-    return names;
-}
-
-QString
-AudioTargetFactory::getCallbackTargetDescription(QString name) const
-{
-    if (name == "auto") {
-        return QCoreApplication::translate("AudioTargetFactory",
-                                           "(auto)");
-    }
-    if (name == "jack") {
-        return QCoreApplication::translate("AudioTargetFactory",
-                                           "JACK Audio Connection Kit");
-    }
-    if (name == "pulse") {
-        return QCoreApplication::translate("AudioTargetFactory",
-                                           "PulseAudio Server");
-    }
-    if (name == "port") {
-        return QCoreApplication::translate("AudioTargetFactory",
-                                           "Default Soundcard Device");
-    }
-
-    return "(unknown)";
-}
-
-QString
-AudioTargetFactory::getDefaultCallbackTarget() const
-{
-    if (m_default == "") return "auto";
-    return m_default;
-}
-
-bool
-AudioTargetFactory::isAutoCallbackTarget(QString name) const
-{
-    return (name == "auto" || name == "");
-}
-
-void
-AudioTargetFactory::setDefaultCallbackTarget(QString target)
-{
-    m_default = target;
-}
-
-AudioCallbackPlayTarget *
-AudioTargetFactory::createCallbackTarget(AudioCallbackPlaySource *source)
-{
-    AudioCallbackPlayTarget *target = 0;
-
-    if (m_default != "" && m_default != "auto") {
-
-#ifdef HAVE_JACK
-        if (m_default == "jack") target = new AudioJACKTarget(source);
-#endif
-
-#ifdef HAVE_LIBPULSE
-        if (m_default == "pulse") target = new AudioPulseAudioTarget(source);
-#endif
-
-#ifdef HAVE_PORTAUDIO_2_0
-        if (m_default == "port") target = new AudioPortAudioTarget(source);
-#endif
-
-        if (!target || !target->isOK()) {
-            cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open the requested target (\"" << m_default << "\")" << endl;
-            delete target;
-            return 0;
-        } else {
-            return target;
-        }
-    }
-
-#ifdef HAVE_JACK
-    target = new AudioJACKTarget(source);
-    if (target->isOK()) return target;
-    else {
-	cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open JACK target" << endl;
-	delete target;
-    }
-#endif
-    
-#ifdef HAVE_LIBPULSE
-    target = new AudioPulseAudioTarget(source);
-    if (target->isOK()) return target;
-    else {
-	cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open PulseAudio target" << endl;
-	delete target;
-    }
-#endif
-    
-#ifdef HAVE_PORTAUDIO_2_0
-    target = new AudioPortAudioTarget(source);
-    if (target->isOK()) return target;
-    else {
-	cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open PortAudio target" << endl;
-	delete target;
-    }
-#endif
-
-    cerr << "WARNING: AudioTargetFactory::createCallbackTarget: No suitable targets available" << endl;
-    return 0;
-}
-
-
--- a/audioio/AudioTargetFactory.h	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef _AUDIO_TARGET_FACTORY_H_
-#define _AUDIO_TARGET_FACTORY_H_
-
-#include <vector>
-#include <QString>
-
-#include "base/Debug.h"
-
-class AudioCallbackPlaySource;
-class AudioCallbackPlayTarget;
-
-class AudioTargetFactory 
-{
-public:
-    static AudioTargetFactory *getInstance();
-
-    std::vector<QString> getCallbackTargetNames(bool includeAuto = true) const;
-    QString getCallbackTargetDescription(QString name) const;
-    QString getDefaultCallbackTarget() const;
-    bool isAutoCallbackTarget(QString name) const;
-    void setDefaultCallbackTarget(QString name);
-
-    AudioCallbackPlayTarget *createCallbackTarget(AudioCallbackPlaySource *);
-
-protected:
-    AudioTargetFactory();
-    static AudioTargetFactory *m_instance;
-    QString m_default;
-};
-
-#endif
-
--- a/audioio/ClipMixer.cpp	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,248 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam, 2006-2014 QMUL.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#include "ClipMixer.h"
-
-#include <sndfile.h>
-#include <cmath>
-
-#include "base/Debug.h"
-
-//#define DEBUG_CLIP_MIXER 1
-
-ClipMixer::ClipMixer(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize) :
-    m_channels(channels),
-    m_sampleRate(sampleRate),
-    m_blockSize(blockSize),
-    m_clipData(0),
-    m_clipLength(0),
-    m_clipF0(0),
-    m_clipRate(0)
-{
-}
-
-ClipMixer::~ClipMixer()
-{
-    if (m_clipData) free(m_clipData);
-}
-
-void
-ClipMixer::setChannelCount(int channels)
-{
-    m_channels = channels;
-}
-
-bool
-ClipMixer::loadClipData(QString path, double f0, double level)
-{
-    if (m_clipData) {
-        cerr << "ClipMixer::loadClipData: Already have clip loaded" << endl;
-        return false;
-    }
-
-    SF_INFO info;
-    SNDFILE *file;
-    float *tmpFrames;
-    sv_frame_t i;
-
-    info.format = 0;
-    file = sf_open(path.toLocal8Bit().data(), SFM_READ, &info);
-    if (!file) {
-	cerr << "ClipMixer::loadClipData: Failed to open file path \""
-             << path << "\": " << sf_strerror(file) << endl;
-	return false;
-    }
-
-    tmpFrames = (float *)malloc(info.frames * info.channels * sizeof(float));
-    if (!tmpFrames) {
-        cerr << "ClipMixer::loadClipData: malloc(" << info.frames * info.channels * sizeof(float) << ") failed" << endl;
-        return false;
-    }
-
-    sf_readf_float(file, tmpFrames, info.frames);
-    sf_close(file);
-
-    m_clipData = (float *)malloc(info.frames * sizeof(float));
-    if (!m_clipData) {
-        cerr << "ClipMixer::loadClipData: malloc(" << info.frames * sizeof(float) << ") failed" << endl;
-	free(tmpFrames);
-	return false;
-    }
-
-    for (i = 0; i < info.frames; ++i) {
-	int j;
-	m_clipData[i] = 0.0f;
-	for (j = 0; j < info.channels; ++j) {
-	    m_clipData[i] += tmpFrames[i * info.channels + j] * float(level);
-	}
-    }
-
-    free(tmpFrames);
-
-    m_clipLength = info.frames;
-    m_clipF0 = f0;
-    m_clipRate = info.samplerate;
-
-    return true;
-}
-
-void
-ClipMixer::reset()
-{
-    m_playing.clear();
-}
-
-double
-ClipMixer::getResampleRatioFor(double frequency)
-{
-    if (!m_clipData || !m_clipRate) return 1.0;
-    double pitchRatio = m_clipF0 / frequency;
-    double resampleRatio = m_sampleRate / m_clipRate;
-    return pitchRatio * resampleRatio;
-}
-
-sv_frame_t
-ClipMixer::getResampledClipDuration(double frequency)
-{
-    return sv_frame_t(ceil(double(m_clipLength) * getResampleRatioFor(frequency)));
-}
-
-void
-ClipMixer::mix(float **toBuffers, 
-               float gain,
-               std::vector<NoteStart> newNotes, 
-               std::vector<NoteEnd> endingNotes)
-{
-    foreach (NoteStart note, newNotes) {
-        if (note.frequency > 20 && 
-            note.frequency < 5000) {
-            m_playing.push_back(note);
-        }
-    }
-
-    std::vector<NoteStart> remaining;
-
-    float *levels = new float[m_channels];
-
-#ifdef DEBUG_CLIP_MIXER
-    cerr << "ClipMixer::mix: have " << m_playing.size() << " playing note(s)"
-         << " and " << endingNotes.size() << " note(s) ending here"
-         << endl;
-#endif
-
-    foreach (NoteStart note, m_playing) {
-
-        for (int c = 0; c < m_channels; ++c) {
-            levels[c] = note.level * gain;
-        }
-        if (note.pan != 0.0 && m_channels == 2) {
-            levels[0] *= 1.0f - note.pan;
-            levels[1] *= note.pan + 1.0f;
-        }
-
-        sv_frame_t start = note.frameOffset;
-        sv_frame_t durationHere = m_blockSize;
-        if (start > 0) durationHere = m_blockSize - start;
-
-        bool ending = false;
-
-        foreach (NoteEnd end, endingNotes) {
-            if (end.frequency == note.frequency && 
-                end.frameOffset >= start &&
-                end.frameOffset <= m_blockSize) {
-                ending = true;
-                durationHere = end.frameOffset;
-                if (start > 0) durationHere = end.frameOffset - start;
-                break;
-            }
-        }
-
-        sv_frame_t clipDuration = getResampledClipDuration(note.frequency);
-        if (start + clipDuration > 0) {
-            if (start < 0 && start + clipDuration < durationHere) {
-                durationHere = start + clipDuration;
-            }
-            if (durationHere > 0) {
-                mixNote(toBuffers,
-                        levels,
-                        note.frequency,
-                        start < 0 ? -start : 0,
-                        start > 0 ?  start : 0,
-                        durationHere,
-                        ending);
-            }
-        }
-
-        if (!ending) {
-            NoteStart adjusted = note;
-            adjusted.frameOffset -= m_blockSize;
-            remaining.push_back(adjusted);
-        }
-    }
-
-    delete[] levels;
-
-    m_playing = remaining;
-}
-
-void
-ClipMixer::mixNote(float **toBuffers,
-                   float *levels,
-                   float frequency,
-                   sv_frame_t sourceOffset,
-                   sv_frame_t targetOffset,
-                   sv_frame_t sampleCount,
-                   bool isEnd)
-{
-    if (!m_clipData) return;
-
-    double ratio = getResampleRatioFor(frequency);
-    
-    double releaseTime = 0.01;
-    sv_frame_t releaseSampleCount = sv_frame_t(round(releaseTime * m_sampleRate));
-    if (releaseSampleCount > sampleCount) {
-        releaseSampleCount = sampleCount;
-    }
-    double releaseFraction = 1.0/double(releaseSampleCount);
-
-    for (sv_frame_t i = 0; i < sampleCount; ++i) {
-
-        sv_frame_t s = sourceOffset + i;
-
-        double os = double(s) / ratio;
-        sv_frame_t osi = sv_frame_t(floor(os));
-
-        //!!! just linear interpolation for now (same as SV's sample
-        //!!! player). a small sinc kernel would be better and
-        //!!! probably "good enough"
-        double value = 0.0;
-        if (osi < m_clipLength) {
-            value += m_clipData[osi];
-        }
-        if (osi + 1 < m_clipLength) {
-            value += (m_clipData[osi + 1] - m_clipData[osi]) * (os - double(osi));
-        }
-         
-        if (isEnd && i + releaseSampleCount > sampleCount) {
-            value *= releaseFraction * double(sampleCount - i); // linear ramp for release
-        }
-
-        for (int c = 0; c < m_channels; ++c) {
-            toBuffers[c][targetOffset + i] += float(levels[c] * value);
-        }
-    }
-}
-
-
--- a/audioio/ClipMixer.h	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,94 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam, 2006-2014 QMUL.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef CLIP_MIXER_H
-#define CLIP_MIXER_H
-
-#include <QString>
-#include <vector>
-
-#include "base/BaseTypes.h"
-
-/**
- * Mix in synthetic notes produced by resampling a prerecorded
- * clip. (i.e. this is an implementation of a digital sampler in the
- * musician's sense.) This can mix any number of notes of arbitrary
- * frequency, so long as they all use the same sample clip.
- */
-
-class ClipMixer
-{
-public:
-    ClipMixer(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize);
-    ~ClipMixer();
-
-    void setChannelCount(int channels);
-
-    /**
-     * Load a sample clip from a wav file. This can only happen once:
-     * construct a new ClipMixer if you want a different clip. The
-     * clip was recorded at a pitch with fundamental frequency clipF0,
-     * and should be scaled by level (in the range 0-1) when playing
-     * back.
-     */
-    bool loadClipData(QString clipFilePath, double clipF0, double level);
-
-    void reset(); // discarding any playing notes
-
-    struct NoteStart {
-	sv_frame_t frameOffset; // within current processing block
-	float frequency; // Hz
-	float level; // volume in range (0,1]
-	float pan; // range [-1,1]
-    };
-
-    struct NoteEnd {
-	sv_frame_t frameOffset; // in current processing block
-        float frequency; // matching note start
-    };
-
-    void mix(float **toBuffers, 
-             float gain,
-	     std::vector<NoteStart> newNotes, 
-	     std::vector<NoteEnd> endingNotes);
-
-private:
-    int m_channels;
-    sv_samplerate_t m_sampleRate;
-    sv_frame_t m_blockSize;
-
-    QString m_clipPath;
-
-    float *m_clipData;
-    sv_frame_t m_clipLength;
-    double m_clipF0;
-    sv_samplerate_t m_clipRate;
-
-    std::vector<NoteStart> m_playing;
-
-    double getResampleRatioFor(double frequency);
-    sv_frame_t getResampledClipDuration(double frequency);
-
-    void mixNote(float **toBuffers, 
-                 float *levels,
-                 float frequency,
-                 sv_frame_t sourceOffset, // within resampled note
-                 sv_frame_t targetOffset, // within target buffer
-                 sv_frame_t sampleCount,
-                 bool isEnd);
-};
-
-
-#endif
--- a/audioio/ContinuousSynth.cpp	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,149 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#include "ContinuousSynth.h"
-
-#include "base/Debug.h"
-#include "system/System.h"
-
-#include <cmath>
-
-ContinuousSynth::ContinuousSynth(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize, int waveType) :
-    m_channels(channels),
-    m_sampleRate(sampleRate),
-    m_blockSize(blockSize),
-    m_prevF0(-1.0),
-    m_phase(0.0),
-    m_wavetype(waveType) // 0: 3 sinusoids, 1: 1 sinusoid, 2: sawtooth, 3: square
-{
-}
-
-ContinuousSynth::~ContinuousSynth()
-{
-}
-
-void
-ContinuousSynth::reset()
-{
-    m_phase = 0;
-}
-
-void
-ContinuousSynth::mix(float **toBuffers, float gain, float pan, float f0f)
-{
-    double f0(f0f);
-    if (f0 == 0.0) f0 = m_prevF0;
-
-    bool wasOn = (m_prevF0 > 0.0);
-    bool nowOn = (f0 > 0.0);
-
-    if (!nowOn && !wasOn) {
-        m_phase = 0;
-        return;
-    }
-
-    sv_frame_t fadeLength = 100;
-
-    float *levels = new float[m_channels];
-    
-    for (int c = 0; c < m_channels; ++c) {
-        levels[c] = gain * 0.5f; // scale gain otherwise too loud compared to source
-    }
-    if (pan != 0.0 && m_channels == 2) {
-        levels[0] *= 1.0f - pan;
-        levels[1] *= pan + 1.0f;
-    }
-
-//    cerr << "ContinuousSynth::mix: f0 = " << f0 << " (from " << m_prevF0 << "), phase = " << m_phase << endl;
-
-    for (sv_frame_t i = 0; i < m_blockSize; ++i) {
-
-        double fHere = (nowOn ? f0 : m_prevF0);
-
-        if (wasOn && nowOn && (f0 != m_prevF0) && (i < fadeLength)) {
-            // interpolate the frequency shift
-            fHere = m_prevF0 + ((f0 - m_prevF0) * double(i)) / double(fadeLength);
-        }
-
-        double phasor = (fHere * 2 * M_PI) / m_sampleRate;
-    
-        m_phase = m_phase + phasor;
-
-        int harmonics = int((m_sampleRate / 4) / fHere - 1);
-        if (harmonics < 1) harmonics = 1;
-
-        switch (m_wavetype) {
-        case 1:
-            harmonics = 1;
-            break;
-        case 2:
-            break;
-        case 3:
-            break;
-        default:
-            harmonics = 3;
-            break;
-        }
-
-        for (int h = 0; h < harmonics; ++h) {
-
-            double v = 0;
-            double hn = 0;
-            double hp = 0;
-
-            switch (m_wavetype) {
-            case 1: // single sinusoid
-                v = sin(m_phase);
-                break;
-            case 2: // sawtooth
-                if (h != 0) {
-                    hn = h + 1;
-                    hp = m_phase * hn;
-                    v = -(1.0 / M_PI) * sin(hp) / hn;
-                } else {
-                    v = 0.5;
-                }
-                break;
-            case 3: // square
-                hn = h*2 + 1;
-                hp = m_phase * hn;
-                v = sin(hp) / hn;
-                break;
-            default: // 3 sinusoids
-                hn = h + 1;
-                hp = m_phase * hn;
-                v = sin(hp) / hn;
-                break;
-            }
-
-            if (!wasOn && i < fadeLength) {
-                // fade in
-                v = v * (double(i) / double(fadeLength));
-            } else if (!nowOn) {
-                // fade out
-                if (i > fadeLength) v = 0;
-                else v = v * (1.0 - (double(i) / double(fadeLength)));
-            }
-
-            for (int c = 0; c < m_channels; ++c) {
-                toBuffers[c][i] += float(levels[c] * v);
-            }
-        }
-    }    
-
-    m_prevF0 = f0;
-
-    delete[] levels;
-}
-
--- a/audioio/ContinuousSynth.h	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef CONTINUOUS_SYNTH_H
-#define CONTINUOUS_SYNTH_H
-
-#include "base/BaseTypes.h"
-
-/**
- * Mix into a target buffer a signal synthesised so as to sound at a
- * specific frequency. The frequency may change with each processing
- * block, or may be switched on or off.
- */
-
-class ContinuousSynth
-{
-public:
-    ContinuousSynth(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize, int waveType);
-    ~ContinuousSynth();
-    
-    void setChannelCount(int channels);
-
-    void reset();
-
-    /**
-     * Mix in a signal to be heard at the given fundamental
-     * frequency. Any oscillator state will be maintained between
-     * process calls so as to provide a continuous sound. The f0 value
-     * may vary between calls.
-     *
-     * Supply f0 equal to 0 if you want to maintain the f0 from the
-     * previous block (without having to remember what it was).
-     *
-     * Supply f0 less than 0 for silence. You should continue to call
-     * this even when the signal is silent if you want to ensure the
-     * sound switches on and off cleanly.
-     */
-    void mix(float **toBuffers,
-             float gain,
-             float pan,
-             float f0);
-
-private:
-    int m_channels;
-    sv_samplerate_t m_sampleRate;
-    sv_frame_t m_blockSize;
-
-    double m_prevF0;
-    double m_phase;
-
-    int m_wavetype;
-};
-
-#endif
--- a/audioio/PlaySpeedRangeMapper.cpp	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,101 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 QMUL.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#include "PlaySpeedRangeMapper.h"
-
-#include <iostream>
-#include <cmath>
-
-// PlaySpeedRangeMapper maps a position in the range [0,120] on to a
-// play speed factor on a logarithmic scale in the range 0.125 ->
-// 8. This ensures that the desirable speed factors 0.25, 0.5, 1, 2,
-// and 4 are all mapped to exact positions (respectively 20, 40, 60,
-// 80, 100).
-
-// Note that the "factor" referred to below is a play speed factor
-// (higher = faster, 1.0 = normal speed), the "value" is a percentage
-// (higher = faster, 100 = normal speed), and the "position" is an
-// integer step on the dial's scale (0-120, 60 = centre).
-
-PlaySpeedRangeMapper::PlaySpeedRangeMapper() :
-    m_minpos(0),
-    m_maxpos(120)
-{
-}
-
-int
-PlaySpeedRangeMapper::getPositionForValue(double value) const
-{
-    // value is percent
-    double factor = getFactorForValue(value);
-    int position = getPositionForFactor(factor);
-    return position;
-}
-
-int
-PlaySpeedRangeMapper::getPositionForValueUnclamped(double value) const
-{
-    // We don't really provide this
-    return getPositionForValue(value);
-}
-
-double
-PlaySpeedRangeMapper::getValueForPosition(int position) const
-{
-    double factor = getFactorForPosition(position);
-    double pc = getValueForFactor(factor);
-    return pc;
-}
-
-double
-PlaySpeedRangeMapper::getValueForPositionUnclamped(int position) const
-{
-    // We don't really provide this
-    return getValueForPosition(position);
-}
-
-double
-PlaySpeedRangeMapper::getValueForFactor(double factor) const
-{
-    return factor * 100.0;
-}
-
-double
-PlaySpeedRangeMapper::getFactorForValue(double value) const
-{
-    return value / 100.0;
-}
-
-int
-PlaySpeedRangeMapper::getPositionForFactor(double factor) const
-{
-    if (factor == 0) return m_minpos;
-    int pos = int(lrint((log2(factor) + 3.0) * 20.0));
-    if (pos < m_minpos) pos = m_minpos;
-    if (pos > m_maxpos) pos = m_maxpos;
-    return pos;
-}
-
-double
-PlaySpeedRangeMapper::getFactorForPosition(int position) const
-{
-    return pow(2.0, double(position) * 0.05 - 3.0);
-}
-
-QString
-PlaySpeedRangeMapper::getUnit() const
-{
-    return "%";
-}
--- a/audioio/PlaySpeedRangeMapper.h	Mon Jul 13 14:39:41 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 QMUL.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef _PLAY_SPEED_RANGE_MAPPER_H_
-#define _PLAY_SPEED_RANGE_MAPPER_H_
-
-#include "base/RangeMapper.h"
-
-class PlaySpeedRangeMapper : public RangeMapper
-{
-public:
-    PlaySpeedRangeMapper();
-
-    int getMinPosition() const { return m_minpos; }
-    int getMaxPosition() const { return m_maxpos; }
-    
-    virtual int getPositionForValue(double value) const;
-    virtual int getPositionForValueUnclamped(double value) const;
-
-    virtual double getValueForPosition(int position) const;
-    virtual double getValueForPositionUnclamped(int position) const;
-
-    int getPositionForFactor(double factor) const;
-    double getValueForFactor(double factor) const;
-
-    double getFactorForPosition(int position) const;
-    double getFactorForValue(double value) const;
-
-    virtual QString getUnit() const;
-    
-protected:
-    int m_minpos;
-    int m_maxpos;
-};
-
-
-#endif
--- a/configure.ac	Mon Jul 13 14:39:41 2015 +0100
+++ b/configure.ac	Tue Aug 04 13:27:42 2015 +0100
@@ -88,7 +88,7 @@
 SV_MODULE_REQUIRED([rubberband],[rubberband],[rubberband/RubberBandStretcher.h],[rubberband],[rubberband_new])
 
 SV_MODULE_OPTIONAL([liblo],[],[lo/lo.h],[lo],[lo_address_new])
-SV_MODULE_OPTIONAL([portaudio_2_0],[portaudio-2.0 >= 19],[portaudio.h],[portaudio],[Pa_IsFormatSupported])
+SV_MODULE_OPTIONAL([portaudio],[portaudio-2.0 >= 19],[portaudio.h],[portaudio],[Pa_IsFormatSupported])
 SV_MODULE_OPTIONAL([JACK],[jack >= 0.100],[jack/jack.h],[jack],[jack_client_open])
 SV_MODULE_OPTIONAL([libpulse],[libpulse >= 0.9],[pulse/pulseaudio.h],[pulse],[pa_stream_new])
 SV_MODULE_OPTIONAL([lrdf],[lrdf >= 0.2],[lrdf.h],[lrdf],[lrdf_init])
--- a/framework/MainWindowBase.cpp	Mon Jul 13 14:39:41 2015 +0100
+++ b/framework/MainWindowBase.cpp	Tue Aug 04 13:27:42 2015 +0100
@@ -47,10 +47,8 @@
 #include "widgets/ModelDataTableDialog.h"
 #include "widgets/InteractiveFileFinder.h"
 
-#include "audioio/AudioCallbackPlaySource.h"
-#include "audioio/AudioCallbackPlayTarget.h"
-#include "audioio/AudioTargetFactory.h"
-#include "audioio/PlaySpeedRangeMapper.h"
+#include "audio/AudioCallbackPlaySource.h"
+#include "audio/PlaySpeedRangeMapper.h"
 #include "data/fileio/DataFileReaderFactory.h"
 #include "data/fileio/PlaylistFileReader.h"
 #include "data/fileio/WavFileWriter.h"
@@ -75,6 +73,9 @@
 #include "data/osc/OSCQueue.h"
 #include "data/midi/MIDIInput.h"
 
+#include <bqaudioio/SystemPlaybackTarget.h>
+#include <bqaudioio/AudioFactory.h>
+
 #include <QApplication>
 #include <QMessageBox>
 #include <QGridLayout>
@@ -268,8 +269,7 @@
 MainWindowBase::~MainWindowBase()
 {
     SVDEBUG << "MainWindowBase::~MainWindowBase" << endl;
-    if (m_playTarget) m_playTarget->shutdown();
-//    delete m_playTarget;
+    delete m_playTarget;
     delete m_playSource;
     delete m_viewManager;
     delete m_oscQueue;
@@ -2164,24 +2164,31 @@
 {
     if (m_playTarget) return;
 
+    //!!! how to handle preferences
+/*    
     QSettings settings;
     settings.beginGroup("Preferences");
     QString targetName = settings.value("audio-target", "").toString();
     settings.endGroup();
-
     AudioTargetFactory *factory = AudioTargetFactory::getInstance();
 
     factory->setDefaultCallbackTarget(targetName);
-    m_playTarget = factory->createCallbackTarget(m_playSource);
+*/
+    
+    m_playTarget =
+        breakfastquay::AudioFactory::createCallbackPlayTarget(m_playSource);
+
+    m_playSource->setSystemPlaybackTarget(m_playTarget);
 
     if (!m_playTarget) {
         emit hideSplash();
 
-        if (factory->isAutoCallbackTarget(targetName)) {
+//        if (factory->isAutoCallbackTarget(targetName)) {
             QMessageBox::warning
 	    (this, tr("Couldn't open audio device"),
 	     tr("<b>No audio available</b><p>Could not open an audio device for playback.<p>Automatic audio device detection failed. Audio playback will not be available during this session.</p>"),
 	     QMessageBox::Ok);
+/*
         } else {
             QMessageBox::warning
                 (this, tr("Couldn't open audio device"),
@@ -2189,6 +2196,7 @@
                  .arg(factory->getCallbackTargetDescription(targetName)),
                  QMessageBox::Ok);
         }
+*/
     }
 }
 
--- a/framework/MainWindowBase.h	Mon Jul 13 14:39:41 2015 +0100
+++ b/framework/MainWindowBase.h	Tue Aug 04 13:27:42 2015 +0100
@@ -46,7 +46,6 @@
 class WaveformLayer;
 class WaveFileModel;
 class AudioCallbackPlaySource;
-class AudioCallbackPlayTarget;
 class CommandHistory;
 class QMenu;
 class AudioDial;
@@ -63,6 +62,10 @@
 class QSignalMapper;
 class QShortcut;
 
+namespace breakfastquay {
+class SystemPlaybackTarget;
+}
+
 /**
  * The base class for the SV main window.  This includes everything to
  * do with general document and pane stack management, but nothing
@@ -306,7 +309,7 @@
 
     bool                     m_audioOutput;
     AudioCallbackPlaySource *m_playSource;
-    AudioCallbackPlayTarget *m_playTarget;
+    breakfastquay::SystemPlaybackTarget *m_playTarget;
 
     class OSCQueueStarter : public QThread
     {
--- a/svapp.pro	Mon Jul 13 14:39:41 2015 +0100
+++ b/svapp.pro	Tue Aug 04 13:27:42 2015 +0100
@@ -23,10 +23,10 @@
     }
 
     win* {
-        DEFINES += HAVE_PORTAUDIO_2_0
+        DEFINES += HAVE_PORTAUDIO
     }
     macx* {
-        DEFINES += HAVE_COREAUDIO HAVE_PORTAUDIO_2_0
+        DEFINES += HAVE_COREAUDIO HAVE_PORTAUDIO
     }
 }
 
@@ -35,32 +35,22 @@
 
 TARGET = svapp
 
-DEPENDPATH += . ../svcore ../svgui
-INCLUDEPATH += . ../svcore ../svgui
+DEPENDPATH += . ../bqaudioio ../svcore ../svgui
+INCLUDEPATH += . ../bqaudioio ../svcore ../svgui
 OBJECTS_DIR = o
 MOC_DIR = o
 
-HEADERS += audioio/AudioCallbackPlaySource.h \
-           audioio/AudioCallbackPlayTarget.h \
-           audioio/AudioGenerator.h \
-           audioio/AudioJACKTarget.h \
-           audioio/AudioPortAudioTarget.h \
-           audioio/AudioPulseAudioTarget.h \
-           audioio/AudioTargetFactory.h \
-           audioio/ClipMixer.h \
-           audioio/ContinuousSynth.h \
-           audioio/PlaySpeedRangeMapper.h
+HEADERS += audio/AudioCallbackPlaySource.h \
+           audio/AudioGenerator.h \
+           audio/ClipMixer.h \
+           audio/ContinuousSynth.h \
+           audio/PlaySpeedRangeMapper.h
 
-SOURCES += audioio/AudioCallbackPlaySource.cpp \
-           audioio/AudioCallbackPlayTarget.cpp \
-           audioio/AudioGenerator.cpp \
-           audioio/AudioJACKTarget.cpp \
-           audioio/AudioPortAudioTarget.cpp \
-           audioio/AudioPulseAudioTarget.cpp \
-           audioio/AudioTargetFactory.cpp \
-           audioio/ClipMixer.cpp \
-           audioio/ContinuousSynth.cpp \
-           audioio/PlaySpeedRangeMapper.cpp
+SOURCES += audio/AudioCallbackPlaySource.cpp \
+           audio/AudioGenerator.cpp \
+           audio/ClipMixer.cpp \
+           audio/ContinuousSynth.cpp \
+           audio/PlaySpeedRangeMapper.cpp
 
 HEADERS += framework/Document.h \
            framework/MainWindowBase.h \