changeset 43:3c5756fb6a68

* Move some things around to facilitate plundering libraries for other applications without needing to duplicate so much code. sv/osc -> data/osc sv/audioio -> audioio sv/transform -> plugin/transform sv/document -> document (will rename to framework in next commit)
author Chris Cannam
date Wed, 24 Oct 2007 16:34:31 +0000
parents 0619006a1ee3
children 9ebe12983f3e
files audioio/AudioCallbackPlaySource.cpp audioio/AudioCallbackPlaySource.h audioio/AudioCallbackPlayTarget.cpp audioio/AudioCallbackPlayTarget.h audioio/AudioCoreAudioTarget.cpp audioio/AudioCoreAudioTarget.h audioio/AudioGenerator.cpp audioio/AudioGenerator.h audioio/AudioJACKTarget.cpp audioio/AudioJACKTarget.h audioio/AudioPortAudioTarget.cpp audioio/AudioPortAudioTarget.h audioio/AudioTargetFactory.cpp audioio/AudioTargetFactory.h audioio/PhaseVocoderTimeStretcher.cpp audioio/PhaseVocoderTimeStretcher.h audioio/PlaySpeedRangeMapper.cpp audioio/PlaySpeedRangeMapper.h
diffstat 18 files changed, 4860 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioCallbackPlaySource.cpp	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,1493 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam and QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "AudioCallbackPlaySource.h"
+
+#include "AudioGenerator.h"
+
+#include "data/model/Model.h"
+#include "view/ViewManager.h"
+#include "base/PlayParameterRepository.h"
+#include "base/Preferences.h"
+#include "data/model/DenseTimeValueModel.h"
+#include "data/model/WaveFileModel.h"
+#include "data/model/SparseOneDimensionalModel.h"
+#include "plugin/RealTimePluginInstance.h"
+#include "PhaseVocoderTimeStretcher.h"
+
+#include <iostream>
+#include <cassert>
+
+//#define DEBUG_AUDIO_PLAY_SOURCE 1
+//#define DEBUG_AUDIO_PLAY_SOURCE_PLAYING 1
+
+const size_t AudioCallbackPlaySource::m_ringBufferSize = 131071;
+
+AudioCallbackPlaySource::AudioCallbackPlaySource(ViewManager *manager) :
+    m_viewManager(manager),
+    m_audioGenerator(new AudioGenerator()),
+    m_readBuffers(0),
+    m_writeBuffers(0),
+    m_readBufferFill(0),
+    m_writeBufferFill(0),
+    m_bufferScavenger(1),
+    m_sourceChannelCount(0),
+    m_blockSize(1024),
+    m_sourceSampleRate(0),
+    m_targetSampleRate(0),
+    m_playLatency(0),
+    m_playing(false),
+    m_exiting(false),
+    m_lastModelEndFrame(0),
+    m_outputLeft(0.0),
+    m_outputRight(0.0),
+    m_auditioningPlugin(0),
+    m_auditioningPluginBypassed(false),
+    m_timeStretcher(0),
+    m_fillThread(0),
+    m_converter(0),
+    m_crapConverter(0),
+    m_resampleQuality(Preferences::getInstance()->getResampleQuality())
+{
+    m_viewManager->setAudioPlaySource(this);
+
+    connect(m_viewManager, SIGNAL(selectionChanged()),
+	    this, SLOT(selectionChanged()));
+    connect(m_viewManager, SIGNAL(playLoopModeChanged()),
+	    this, SLOT(playLoopModeChanged()));
+    connect(m_viewManager, SIGNAL(playSelectionModeChanged()),
+	    this, SLOT(playSelectionModeChanged()));
+
+    connect(PlayParameterRepository::getInstance(),
+	    SIGNAL(playParametersChanged(PlayParameters *)),
+	    this, SLOT(playParametersChanged(PlayParameters *)));
+
+    connect(Preferences::getInstance(),
+            SIGNAL(propertyChanged(PropertyContainer::PropertyName)),
+            this, SLOT(preferenceChanged(PropertyContainer::PropertyName)));
+}
+
+AudioCallbackPlaySource::~AudioCallbackPlaySource()
+{
+    m_exiting = true;
+
+    if (m_fillThread) {
+	m_condition.wakeAll();
+	m_fillThread->wait();
+	delete m_fillThread;
+    }
+
+    clearModels();
+    
+    if (m_readBuffers != m_writeBuffers) {
+	delete m_readBuffers;
+    }
+
+    delete m_writeBuffers;
+
+    delete m_audioGenerator;
+
+    m_bufferScavenger.scavenge(true);
+    m_pluginScavenger.scavenge(true);
+    m_timeStretcherScavenger.scavenge(true);
+}
+
+void
+AudioCallbackPlaySource::addModel(Model *model)
+{
+    if (m_models.find(model) != m_models.end()) return;
+
+    bool canPlay = m_audioGenerator->addModel(model);
+
+    m_mutex.lock();
+
+    m_models.insert(model);
+    if (model->getEndFrame() > m_lastModelEndFrame) {
+	m_lastModelEndFrame = model->getEndFrame();
+    }
+
+    bool buffersChanged = false, srChanged = false;
+
+    size_t modelChannels = 1;
+    DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
+    if (dtvm) modelChannels = dtvm->getChannelCount();
+    if (modelChannels > m_sourceChannelCount) {
+	m_sourceChannelCount = modelChannels;
+    }
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cout << "Adding model with " << modelChannels << " channels " << std::endl;
+#endif
+
+    if (m_sourceSampleRate == 0) {
+
+	m_sourceSampleRate = model->getSampleRate();
+	srChanged = true;
+
+    } else if (model->getSampleRate() != m_sourceSampleRate) {
+
+        // If this is a dense time-value model and we have no other, we
+        // can just switch to this model's sample rate
+
+        if (dtvm) {
+
+            bool conflicting = false;
+
+            for (std::set<Model *>::const_iterator i = m_models.begin();
+                 i != m_models.end(); ++i) {
+                // Only wave file models can be considered conflicting --
+                // writable wave file models are derived and we shouldn't
+                // take their rates into account.  Also, don't give any
+                // particular weight to a file that's already playing at
+                // the wrong rate anyway
+                WaveFileModel *wfm = dynamic_cast<WaveFileModel *>(*i);
+                if (wfm && wfm != dtvm &&
+                    wfm->getSampleRate() != model->getSampleRate() &&
+                    wfm->getSampleRate() == m_sourceSampleRate) {
+                    std::cerr << "AudioCallbackPlaySource::addModel: Conflicting wave file model " << *i << " found" << std::endl;
+                    conflicting = true;
+                    break;
+                }
+            }
+
+            if (conflicting) {
+
+                std::cerr << "AudioCallbackPlaySource::addModel: ERROR: "
+                          << "New model sample rate does not match" << std::endl
+                          << "existing model(s) (new " << model->getSampleRate()
+                          << " vs " << m_sourceSampleRate
+                          << "), playback will be wrong"
+                          << std::endl;
+                
+                emit sampleRateMismatch(model->getSampleRate(),
+                                        m_sourceSampleRate,
+                                        false);
+            } else {
+                m_sourceSampleRate = model->getSampleRate();
+                srChanged = true;
+            }
+        }
+    }
+
+    if (!m_writeBuffers || (m_writeBuffers->size() < getTargetChannelCount())) {
+	clearRingBuffers(true, getTargetChannelCount());
+	buffersChanged = true;
+    } else {
+	if (canPlay) clearRingBuffers(true);
+    }
+
+    if (buffersChanged || srChanged) {
+	if (m_converter) {
+	    src_delete(m_converter);
+            src_delete(m_crapConverter);
+	    m_converter = 0;
+            m_crapConverter = 0;
+	}
+    }
+
+    m_mutex.unlock();
+
+    m_audioGenerator->setTargetChannelCount(getTargetChannelCount());
+
+    if (!m_fillThread) {
+	m_fillThread = new FillThread(*this);
+	m_fillThread->start();
+    }
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cout << "AudioCallbackPlaySource::addModel: now have " << m_models.size() << " model(s) -- emitting modelReplaced" << std::endl;
+#endif
+
+    if (buffersChanged || srChanged) {
+	emit modelReplaced();
+    }
+
+    connect(model, SIGNAL(modelChanged(size_t, size_t)),
+            this, SLOT(modelChanged(size_t, size_t)));
+
+    m_condition.wakeAll();
+}
+
+void
+AudioCallbackPlaySource::modelChanged(size_t startFrame, size_t endFrame)
+{
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cerr << "AudioCallbackPlaySource::modelChanged(" << startFrame << "," << endFrame << ")" << std::endl;
+#endif
+    if (endFrame > m_lastModelEndFrame) m_lastModelEndFrame = endFrame;
+}
+
+void
+AudioCallbackPlaySource::removeModel(Model *model)
+{
+    m_mutex.lock();
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cout << "AudioCallbackPlaySource::removeModel(" << model << ")" << std::endl;
+#endif
+
+    disconnect(model, SIGNAL(modelChanged(size_t, size_t)),
+               this, SLOT(modelChanged(size_t, size_t)));
+
+    m_models.erase(model);
+
+    if (m_models.empty()) {
+	if (m_converter) {
+	    src_delete(m_converter);
+            src_delete(m_crapConverter);
+	    m_converter = 0;
+            m_crapConverter = 0;
+	}
+	m_sourceSampleRate = 0;
+    }
+
+    size_t lastEnd = 0;
+    for (std::set<Model *>::const_iterator i = m_models.begin();
+	 i != m_models.end(); ++i) {
+//	std::cout << "AudioCallbackPlaySource::removeModel(" << model << "): checking end frame on model " << *i << std::endl;
+	if ((*i)->getEndFrame() > lastEnd) lastEnd = (*i)->getEndFrame();
+//	std::cout << "(done, lastEnd now " << lastEnd << ")" << std::endl;
+    }
+    m_lastModelEndFrame = lastEnd;
+
+    m_mutex.unlock();
+
+    m_audioGenerator->removeModel(model);
+
+    clearRingBuffers();
+}
+
+void
+AudioCallbackPlaySource::clearModels()
+{
+    m_mutex.lock();
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cout << "AudioCallbackPlaySource::clearModels()" << std::endl;
+#endif
+
+    m_models.clear();
+
+    if (m_converter) {
+	src_delete(m_converter);
+        src_delete(m_crapConverter);
+	m_converter = 0;
+        m_crapConverter = 0;
+    }
+
+    m_lastModelEndFrame = 0;
+
+    m_sourceSampleRate = 0;
+
+    m_mutex.unlock();
+
+    m_audioGenerator->clearModels();
+}    
+
+void
+AudioCallbackPlaySource::clearRingBuffers(bool haveLock, size_t count)
+{
+    if (!haveLock) m_mutex.lock();
+
+    if (count == 0) {
+	if (m_writeBuffers) count = m_writeBuffers->size();
+    }
+
+    size_t sf = m_readBufferFill;
+    RingBuffer<float> *rb = getReadRingBuffer(0);
+    if (rb) {
+	//!!! This is incorrect if we're in a non-contiguous selection
+	//Same goes for all related code (subtracting the read space
+	//from the fill frame to try to establish where the effective
+	//pre-resample/timestretch read pointer is)
+	size_t rs = rb->getReadSpace();
+	if (rs < sf) sf -= rs;
+	else sf = 0;
+    }
+    m_writeBufferFill = sf;
+
+    if (m_readBuffers != m_writeBuffers) {
+	delete m_writeBuffers;
+    }
+
+    m_writeBuffers = new RingBufferVector;
+
+    for (size_t i = 0; i < count; ++i) {
+	m_writeBuffers->push_back(new RingBuffer<float>(m_ringBufferSize));
+    }
+
+//    std::cout << "AudioCallbackPlaySource::clearRingBuffers: Created "
+//	      << count << " write buffers" << std::endl;
+
+    if (!haveLock) {
+	m_mutex.unlock();
+    }
+}
+
+void
+AudioCallbackPlaySource::play(size_t startFrame)
+{
+    if (m_viewManager->getPlaySelectionMode() &&
+	!m_viewManager->getSelections().empty()) {
+	MultiSelection::SelectionList selections = m_viewManager->getSelections();
+	MultiSelection::SelectionList::iterator i = selections.begin();
+	if (i != selections.end()) {
+	    if (startFrame < i->getStartFrame()) {
+		startFrame = i->getStartFrame();
+	    } else {
+		MultiSelection::SelectionList::iterator j = selections.end();
+		--j;
+		if (startFrame >= j->getEndFrame()) {
+		    startFrame = i->getStartFrame();
+		}
+	    }
+	}
+    } else {
+	if (startFrame >= m_lastModelEndFrame) {
+	    startFrame = 0;
+	}
+    }
+
+    // The fill thread will automatically empty its buffers before
+    // starting again if we have not so far been playing, but not if
+    // we're just re-seeking.
+
+    m_mutex.lock();
+    if (m_playing) {
+	m_readBufferFill = m_writeBufferFill = startFrame;
+	if (m_readBuffers) {
+	    for (size_t c = 0; c < getTargetChannelCount(); ++c) {
+		RingBuffer<float> *rb = getReadRingBuffer(c);
+		if (rb) rb->reset();
+	    }
+	}
+	if (m_converter) src_reset(m_converter);
+        if (m_crapConverter) src_reset(m_crapConverter);
+    } else {
+	if (m_converter) src_reset(m_converter);
+        if (m_crapConverter) src_reset(m_crapConverter);
+	m_readBufferFill = m_writeBufferFill = startFrame;
+    }
+    m_mutex.unlock();
+
+    m_audioGenerator->reset();
+
+    bool changed = !m_playing;
+    m_playing = true;
+    m_condition.wakeAll();
+    if (changed) emit playStatusChanged(m_playing);
+}
+
+void
+AudioCallbackPlaySource::stop()
+{
+    bool changed = m_playing;
+    m_playing = false;
+    m_condition.wakeAll();
+    if (changed) emit playStatusChanged(m_playing);
+}
+
+void
+AudioCallbackPlaySource::selectionChanged()
+{
+    if (m_viewManager->getPlaySelectionMode()) {
+	clearRingBuffers();
+    }
+}
+
+void
+AudioCallbackPlaySource::playLoopModeChanged()
+{
+    clearRingBuffers();
+}
+
+void
+AudioCallbackPlaySource::playSelectionModeChanged()
+{
+    if (!m_viewManager->getSelections().empty()) {
+	clearRingBuffers();
+    }
+}
+
+void
+AudioCallbackPlaySource::playParametersChanged(PlayParameters *)
+{
+    clearRingBuffers();
+}
+
+void
+AudioCallbackPlaySource::preferenceChanged(PropertyContainer::PropertyName n)
+{
+    if (n == "Resample Quality") {
+        setResampleQuality(Preferences::getInstance()->getResampleQuality());
+    }
+}
+
+void
+AudioCallbackPlaySource::audioProcessingOverload()
+{
+    RealTimePluginInstance *ap = m_auditioningPlugin;
+    if (ap && m_playing && !m_auditioningPluginBypassed) {
+        m_auditioningPluginBypassed = true;
+        emit audioOverloadPluginDisabled();
+    }
+}
+
+void
+AudioCallbackPlaySource::setTargetBlockSize(size_t size)
+{
+//    std::cout << "AudioCallbackPlaySource::setTargetBlockSize() -> " << size << std::endl;
+    assert(size < m_ringBufferSize);
+    m_blockSize = size;
+}
+
+size_t
+AudioCallbackPlaySource::getTargetBlockSize() const
+{
+//    std::cout << "AudioCallbackPlaySource::getTargetBlockSize() -> " << m_blockSize << std::endl;
+    return m_blockSize;
+}
+
+void
+AudioCallbackPlaySource::setTargetPlayLatency(size_t latency)
+{
+    m_playLatency = latency;
+}
+
+size_t
+AudioCallbackPlaySource::getTargetPlayLatency() const
+{
+    return m_playLatency;
+}
+
+size_t
+AudioCallbackPlaySource::getCurrentPlayingFrame()
+{
+    bool resample = false;
+    double ratio = 1.0;
+
+    if (getSourceSampleRate() != getTargetSampleRate()) {
+	resample = true;
+	ratio = double(getSourceSampleRate()) / double(getTargetSampleRate());
+    }
+
+    size_t readSpace = 0;
+    for (size_t c = 0; c < getTargetChannelCount(); ++c) {
+	RingBuffer<float> *rb = getReadRingBuffer(c);
+	if (rb) {
+	    size_t spaceHere = rb->getReadSpace();
+	    if (c == 0 || spaceHere < readSpace) readSpace = spaceHere;
+	}
+    }
+
+    if (resample) {
+	readSpace = size_t(readSpace * ratio + 0.1);
+    }
+
+    size_t latency = m_playLatency;
+    if (resample) latency = size_t(m_playLatency * ratio + 0.1);
+
+    PhaseVocoderTimeStretcher *timeStretcher = m_timeStretcher;
+    if (timeStretcher) {
+	latency += timeStretcher->getProcessingLatency();
+    }
+
+    latency += readSpace;
+    size_t bufferedFrame = m_readBufferFill;
+
+    bool looping = m_viewManager->getPlayLoopMode();
+    bool constrained = (m_viewManager->getPlaySelectionMode() &&
+			!m_viewManager->getSelections().empty());
+
+    size_t framePlaying = bufferedFrame;
+
+    if (looping && !constrained) {
+	while (framePlaying < latency) framePlaying += m_lastModelEndFrame;
+    }
+
+    if (framePlaying > latency) framePlaying -= latency;
+    else framePlaying = 0;
+
+    if (!constrained) {
+	if (!looping && framePlaying > m_lastModelEndFrame) {
+	    framePlaying = m_lastModelEndFrame;
+	    stop();
+	}
+	return framePlaying;
+    }
+
+    MultiSelection::SelectionList selections = m_viewManager->getSelections();
+    MultiSelection::SelectionList::const_iterator i;
+
+//    i = selections.begin();
+//    size_t rangeStart = i->getStartFrame();
+
+    i = selections.end();
+    --i;
+    size_t rangeEnd = i->getEndFrame();
+
+    for (i = selections.begin(); i != selections.end(); ++i) {
+	if (i->contains(bufferedFrame)) break;
+    }
+
+    size_t f = bufferedFrame;
+
+//    std::cout << "getCurrentPlayingFrame: f=" << f << ", latency=" << latency << ", rangeEnd=" << rangeEnd << std::endl;
+
+    if (i == selections.end()) {
+	--i;
+	if (i->getEndFrame() + latency < f) {
+//    std::cout << "framePlaying = " << framePlaying << ", rangeEnd = " << rangeEnd << std::endl;
+
+	    if (!looping && (framePlaying > rangeEnd)) {
+//		std::cout << "STOPPING" << std::endl;
+		stop();
+		return rangeEnd;
+	    } else {
+		return framePlaying;
+	    }
+	} else {
+//	    std::cout << "latency <- " << latency << "-(" << f << "-" << i->getEndFrame() << ")" << std::endl;
+	    latency -= (f - i->getEndFrame());
+	    f = i->getEndFrame();
+	}
+    }
+
+//    std::cout << "i=(" << i->getStartFrame() << "," << i->getEndFrame() << ") f=" << f << ", latency=" << latency << std::endl;
+
+    while (latency > 0) {
+	size_t offset = f - i->getStartFrame();
+	if (offset >= latency) {
+	    if (f > latency) {
+		framePlaying = f - latency;
+	    } else {
+		framePlaying = 0;
+	    }
+	    break;
+	} else {
+	    if (i == selections.begin()) {
+		if (looping) {
+		    i = selections.end();
+		}
+	    }
+	    latency -= offset;
+	    --i;
+	    f = i->getEndFrame();
+	}
+    }
+
+    return framePlaying;
+}
+
+void
+AudioCallbackPlaySource::setOutputLevels(float left, float right)
+{
+    m_outputLeft = left;
+    m_outputRight = right;
+}
+
+bool
+AudioCallbackPlaySource::getOutputLevels(float &left, float &right)
+{
+    left = m_outputLeft;
+    right = m_outputRight;
+    return true;
+}
+
+void
+AudioCallbackPlaySource::setTargetSampleRate(size_t sr)
+{
+    m_targetSampleRate = sr;
+    initialiseConverter();
+}
+
+void
+AudioCallbackPlaySource::initialiseConverter()
+{
+    m_mutex.lock();
+
+    if (m_converter) {
+        src_delete(m_converter);
+        src_delete(m_crapConverter);
+        m_converter = 0;
+        m_crapConverter = 0;
+    }
+
+    if (getSourceSampleRate() != getTargetSampleRate()) {
+
+	int err = 0;
+
+	m_converter = src_new(m_resampleQuality == 2 ? SRC_SINC_BEST_QUALITY :
+                              m_resampleQuality == 1 ? SRC_SINC_MEDIUM_QUALITY :
+                              m_resampleQuality == 0 ? SRC_SINC_FASTEST :
+                                                       SRC_SINC_MEDIUM_QUALITY,
+			      getTargetChannelCount(), &err);
+
+        if (m_converter) {
+            m_crapConverter = src_new(SRC_LINEAR,
+                                      getTargetChannelCount(),
+                                      &err);
+        }
+
+	if (!m_converter || !m_crapConverter) {
+	    std::cerr
+		<< "AudioCallbackPlaySource::setModel: ERROR in creating samplerate converter: "
+		<< src_strerror(err) << std::endl;
+
+            if (m_converter) {
+                src_delete(m_converter);
+                m_converter = 0;
+            } 
+
+            if (m_crapConverter) {
+                src_delete(m_crapConverter);
+                m_crapConverter = 0;
+            }
+
+            m_mutex.unlock();
+
+            emit sampleRateMismatch(getSourceSampleRate(),
+                                    getTargetSampleRate(),
+                                    false);
+	} else {
+
+            m_mutex.unlock();
+
+            emit sampleRateMismatch(getSourceSampleRate(),
+                                    getTargetSampleRate(),
+                                    true);
+        }
+    } else {
+        m_mutex.unlock();
+    }
+}
+
+void
+AudioCallbackPlaySource::setResampleQuality(int q)
+{
+    if (q == m_resampleQuality) return;
+    m_resampleQuality = q;
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cerr << "AudioCallbackPlaySource::setResampleQuality: setting to "
+              << m_resampleQuality << std::endl;
+#endif
+
+    initialiseConverter();
+}
+
+void
+AudioCallbackPlaySource::setAuditioningPlugin(RealTimePluginInstance *plugin)
+{
+    RealTimePluginInstance *formerPlugin = m_auditioningPlugin;
+    m_auditioningPlugin = plugin;
+    m_auditioningPluginBypassed = false;
+    if (formerPlugin) m_pluginScavenger.claim(formerPlugin);
+}
+
+void
+AudioCallbackPlaySource::setSoloModelSet(std::set<Model *> s)
+{
+    m_audioGenerator->setSoloModelSet(s);
+    clearRingBuffers();
+}
+
+void
+AudioCallbackPlaySource::clearSoloModelSet()
+{
+    m_audioGenerator->clearSoloModelSet();
+    clearRingBuffers();
+}
+
+size_t
+AudioCallbackPlaySource::getTargetSampleRate() const
+{
+    if (m_targetSampleRate) return m_targetSampleRate;
+    else return getSourceSampleRate();
+}
+
+size_t
+AudioCallbackPlaySource::getSourceChannelCount() const
+{
+    return m_sourceChannelCount;
+}
+
+size_t
+AudioCallbackPlaySource::getTargetChannelCount() const
+{
+    if (m_sourceChannelCount < 2) return 2;
+    return m_sourceChannelCount;
+}
+
+size_t
+AudioCallbackPlaySource::getSourceSampleRate() const
+{
+    return m_sourceSampleRate;
+}
+
+void
+AudioCallbackPlaySource::setTimeStretch(float factor, bool sharpen, bool mono)
+{
+    // Avoid locks -- create, assign, mark old one for scavenging
+    // later (as a call to getSourceSamples may still be using it)
+
+    PhaseVocoderTimeStretcher *existingStretcher = m_timeStretcher;
+
+    size_t channels = getTargetChannelCount();
+    if (mono) channels = 1;
+
+    if (existingStretcher &&
+        existingStretcher->getRatio() == factor &&
+        existingStretcher->getSharpening() == sharpen &&
+        existingStretcher->getChannelCount() == channels) {
+	return;
+    }
+
+    if (factor != 1) {
+
+        if (existingStretcher &&
+            existingStretcher->getSharpening() == sharpen &&
+            existingStretcher->getChannelCount() == channels) {
+            existingStretcher->setRatio(factor);
+            return;
+        }
+
+	PhaseVocoderTimeStretcher *newStretcher = new PhaseVocoderTimeStretcher
+	    (getTargetSampleRate(),
+             channels,
+             factor,
+             sharpen,
+             getTargetBlockSize());
+
+	m_timeStretcher = newStretcher;
+
+    } else {
+	m_timeStretcher = 0;
+    }
+
+    if (existingStretcher) {
+	m_timeStretcherScavenger.claim(existingStretcher);
+    }
+}
+
+size_t
+AudioCallbackPlaySource::getSourceSamples(size_t count, float **buffer)
+{
+    if (!m_playing) {
+	for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) {
+	    for (size_t i = 0; i < count; ++i) {
+		buffer[ch][i] = 0.0;
+	    }
+	}
+	return 0;
+    }
+
+    // Ensure that all buffers have at least the amount of data we
+    // need -- else reduce the size of our requests correspondingly
+
+    for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) {
+
+        RingBuffer<float> *rb = getReadRingBuffer(ch);
+        
+        if (!rb) {
+            std::cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: "
+                      << "No ring buffer available for channel " << ch
+                      << ", returning no data here" << std::endl;
+            count = 0;
+            break;
+        }
+
+        size_t rs = rb->getReadSpace();
+        if (rs < count) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+            std::cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: "
+                      << "Ring buffer for channel " << ch << " has only "
+                      << rs << " (of " << count << ") samples available, "
+                      << "reducing request size" << std::endl;
+#endif
+            count = rs;
+        }
+    }
+
+    if (count == 0) return 0;
+
+    PhaseVocoderTimeStretcher *ts = m_timeStretcher;
+
+    if (!ts || ts->getRatio() == 1) {
+
+	size_t got = 0;
+
+	for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) {
+
+	    RingBuffer<float> *rb = getReadRingBuffer(ch);
+
+	    if (rb) {
+
+		// this is marginally more likely to leave our channels in
+		// sync after a processing failure than just passing "count":
+		size_t request = count;
+		if (ch > 0) request = got;
+
+		got = rb->read(buffer[ch], request);
+	    
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+		std::cout << "AudioCallbackPlaySource::getSamples: got " << got << " (of " << count << ") samples on channel " << ch << ", signalling for more (possibly)" << std::endl;
+#endif
+	    }
+
+	    for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) {
+		for (size_t i = got; i < count; ++i) {
+		    buffer[ch][i] = 0.0;
+		}
+	    }
+	}
+
+        applyAuditioningEffect(count, buffer);
+
+        m_condition.wakeAll();
+	return got;
+    }
+
+    float ratio = ts->getRatio();
+
+//            std::cout << "ratio = " << ratio << std::endl;
+
+    size_t channels = getTargetChannelCount();
+    bool mix = (channels > 1 && ts->getChannelCount() == 1);
+
+    size_t available;
+
+    int warned = 0;
+
+    // We want output blocks of e.g. 1024 (probably fixed, certainly
+    // bounded).  We can provide input blocks of any size (unbounded)
+    // at the timestretcher's request.  The input block for a given
+    // output is approx output / ratio, but we can't predict it
+    // exactly, for an adaptive timestretcher.  The stretcher will
+    // need some additional buffer space.  See the time stretcher code
+    // and comments.
+
+    while ((available = ts->getAvailableOutputSamples()) < count) {
+
+        size_t reqd = lrintf((count - available) / ratio);
+        reqd = std::max(reqd, ts->getRequiredInputSamples());
+        if (reqd == 0) reqd = 1;
+                
+        float *ib[channels];
+
+        size_t got = reqd;
+
+        if (mix) {
+            for (size_t c = 0; c < channels; ++c) {
+                if (c == 0) ib[c] = new float[reqd]; //!!! fix -- this is a rt function
+                else ib[c] = 0;
+                RingBuffer<float> *rb = getReadRingBuffer(c);
+                if (rb) {
+                    size_t gotHere;
+                    if (c > 0) gotHere = rb->readAdding(ib[0], got);
+                    else gotHere = rb->read(ib[0], got);
+                    if (gotHere < got) got = gotHere;
+                }
+            }
+        } else {
+            for (size_t c = 0; c < channels; ++c) {
+                ib[c] = new float[reqd]; //!!! fix -- this is a rt function
+                RingBuffer<float> *rb = getReadRingBuffer(c);
+                if (rb) {
+                    size_t gotHere = rb->read(ib[c], got);
+                    if (gotHere < got) got = gotHere;
+                }
+            }
+        }
+
+        if (got < reqd) {
+            std::cerr << "WARNING: Read underrun in playback ("
+                      << got << " < " << reqd << ")" << std::endl;
+        }
+                
+        ts->putInput(ib, got);
+
+        for (size_t c = 0; c < channels; ++c) {
+            delete[] ib[c];
+        }
+
+        if (got == 0) break;
+
+        if (ts->getAvailableOutputSamples() == available) {
+            std::cerr << "WARNING: AudioCallbackPlaySource::getSamples: Added " << got << " samples to time stretcher, created no new available output samples (warned = " << warned << ")" << std::endl;
+            if (++warned == 5) break;
+        }
+    }
+
+    ts->getOutput(buffer, count);
+
+    if (mix) {
+        for (size_t c = 1; c < channels; ++c) {
+            for (size_t i = 0; i < count; ++i) {
+                buffer[c][i] = buffer[0][i] / channels;
+            }
+        }
+        for (size_t i = 0; i < count; ++i) {
+            buffer[0][i] /= channels;
+        }
+    }
+
+    applyAuditioningEffect(count, buffer);
+
+    m_condition.wakeAll();
+
+    return count;
+}
+
+void
+AudioCallbackPlaySource::applyAuditioningEffect(size_t count, float **buffers)
+{
+    if (m_auditioningPluginBypassed) return;
+    RealTimePluginInstance *plugin = m_auditioningPlugin;
+    if (!plugin) return;
+
+    if (plugin->getAudioInputCount() != getTargetChannelCount()) {
+//        std::cerr << "plugin input count " << plugin->getAudioInputCount() 
+//                  << " != our channel count " << getTargetChannelCount()
+//                  << std::endl;
+        return;
+    }
+    if (plugin->getAudioOutputCount() != getTargetChannelCount()) {
+//        std::cerr << "plugin output count " << plugin->getAudioOutputCount() 
+//                  << " != our channel count " << getTargetChannelCount()
+//                  << std::endl;
+        return;
+    }
+    if (plugin->getBufferSize() != count) {
+//        std::cerr << "plugin buffer size " << plugin->getBufferSize() 
+//                  << " != our block size " << count
+//                  << std::endl;
+        return;
+    }
+
+    float **ib = plugin->getAudioInputBuffers();
+    float **ob = plugin->getAudioOutputBuffers();
+
+    for (size_t c = 0; c < getTargetChannelCount(); ++c) {
+        for (size_t i = 0; i < count; ++i) {
+            ib[c][i] = buffers[c][i];
+        }
+    }
+
+    plugin->run(Vamp::RealTime::zeroTime);
+    
+    for (size_t c = 0; c < getTargetChannelCount(); ++c) {
+        for (size_t i = 0; i < count; ++i) {
+            buffers[c][i] = ob[c][i];
+        }
+    }
+}    
+
+// Called from fill thread, m_playing true, mutex held
+bool
+AudioCallbackPlaySource::fillBuffers()
+{
+    static float *tmp = 0;
+    static size_t tmpSize = 0;
+
+    size_t space = 0;
+    for (size_t c = 0; c < getTargetChannelCount(); ++c) {
+	RingBuffer<float> *wb = getWriteRingBuffer(c);
+	if (wb) {
+	    size_t spaceHere = wb->getWriteSpace();
+	    if (c == 0 || spaceHere < space) space = spaceHere;
+	}
+    }
+    
+    if (space == 0) return false;
+
+    size_t f = m_writeBufferFill;
+	
+    bool readWriteEqual = (m_readBuffers == m_writeBuffers);
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cout << "AudioCallbackPlaySourceFillThread: filling " << space << " frames" << std::endl;
+#endif
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cout << "buffered to " << f << " already" << std::endl;
+#endif
+
+    bool resample = (getSourceSampleRate() != getTargetSampleRate());
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cout << (resample ? "" : "not ") << "resampling (source " << getSourceSampleRate() << ", target " << getTargetSampleRate() << ")" << std::endl;
+#endif
+
+    size_t channels = getTargetChannelCount();
+
+    size_t orig = space;
+    size_t got = 0;
+
+    static float **bufferPtrs = 0;
+    static size_t bufferPtrCount = 0;
+
+    if (bufferPtrCount < channels) {
+	if (bufferPtrs) delete[] bufferPtrs;
+	bufferPtrs = new float *[channels];
+	bufferPtrCount = channels;
+    }
+
+    size_t generatorBlockSize = m_audioGenerator->getBlockSize();
+
+    if (resample && !m_converter) {
+	static bool warned = false;
+	if (!warned) {
+	    std::cerr << "WARNING: sample rates differ, but no converter available!" << std::endl;
+	    warned = true;
+	}
+    }
+
+    if (resample && m_converter) {
+
+	double ratio =
+	    double(getTargetSampleRate()) / double(getSourceSampleRate());
+	orig = size_t(orig / ratio + 0.1);
+
+	// orig must be a multiple of generatorBlockSize
+	orig = (orig / generatorBlockSize) * generatorBlockSize;
+	if (orig == 0) return false;
+
+	size_t work = std::max(orig, space);
+
+	// We only allocate one buffer, but we use it in two halves.
+	// We place the non-interleaved values in the second half of
+	// the buffer (orig samples for channel 0, orig samples for
+	// channel 1 etc), and then interleave them into the first
+	// half of the buffer.  Then we resample back into the second
+	// half (interleaved) and de-interleave the results back to
+	// the start of the buffer for insertion into the ringbuffers.
+	// What a faff -- especially as we've already de-interleaved
+	// the audio data from the source file elsewhere before we
+	// even reach this point.
+	
+	if (tmpSize < channels * work * 2) {
+	    delete[] tmp;
+	    tmp = new float[channels * work * 2];
+	    tmpSize = channels * work * 2;
+	}
+
+	float *nonintlv = tmp + channels * work;
+	float *intlv = tmp;
+	float *srcout = tmp + channels * work;
+	
+	for (size_t c = 0; c < channels; ++c) {
+	    for (size_t i = 0; i < orig; ++i) {
+		nonintlv[channels * i + c] = 0.0f;
+	    }
+	}
+
+	for (size_t c = 0; c < channels; ++c) {
+	    bufferPtrs[c] = nonintlv + c * orig;
+	}
+
+	got = mixModels(f, orig, bufferPtrs);
+
+	// and interleave into first half
+	for (size_t c = 0; c < channels; ++c) {
+	    for (size_t i = 0; i < got; ++i) {
+		float sample = nonintlv[c * got + i];
+		intlv[channels * i + c] = sample;
+	    }
+	}
+		
+	SRC_DATA data;
+	data.data_in = intlv;
+	data.data_out = srcout;
+	data.input_frames = got;
+	data.output_frames = work;
+	data.src_ratio = ratio;
+	data.end_of_input = 0;
+	
+	int err = 0;
+
+        if (m_timeStretcher && m_timeStretcher->getRatio() < 0.4) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+            std::cout << "Using crappy converter" << std::endl;
+#endif
+            err = src_process(m_crapConverter, &data);
+        } else {
+            err = src_process(m_converter, &data);
+        }
+
+	size_t toCopy = size_t(got * ratio + 0.1);
+
+	if (err) {
+	    std::cerr
+		<< "AudioCallbackPlaySourceFillThread: ERROR in samplerate conversion: "
+		<< src_strerror(err) << std::endl;
+	    //!!! Then what?
+	} else {
+	    got = data.input_frames_used;
+	    toCopy = data.output_frames_gen;
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	    std::cout << "Resampled " << got << " frames to " << toCopy << " frames" << std::endl;
+#endif
+	}
+	
+	for (size_t c = 0; c < channels; ++c) {
+	    for (size_t i = 0; i < toCopy; ++i) {
+		tmp[i] = srcout[channels * i + c];
+	    }
+	    RingBuffer<float> *wb = getWriteRingBuffer(c);
+	    if (wb) wb->write(tmp, toCopy);
+	}
+
+	m_writeBufferFill = f;
+	if (readWriteEqual) m_readBufferFill = f;
+
+    } else {
+
+	// space must be a multiple of generatorBlockSize
+	space = (space / generatorBlockSize) * generatorBlockSize;
+	if (space == 0) return false;
+
+	if (tmpSize < channels * space) {
+	    delete[] tmp;
+	    tmp = new float[channels * space];
+	    tmpSize = channels * space;
+	}
+
+	for (size_t c = 0; c < channels; ++c) {
+
+	    bufferPtrs[c] = tmp + c * space;
+	    
+	    for (size_t i = 0; i < space; ++i) {
+		tmp[c * space + i] = 0.0f;
+	    }
+	}
+
+	size_t got = mixModels(f, space, bufferPtrs);
+
+	for (size_t c = 0; c < channels; ++c) {
+
+	    RingBuffer<float> *wb = getWriteRingBuffer(c);
+	    if (wb) {
+                size_t actual = wb->write(bufferPtrs[c], got);
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+		std::cout << "Wrote " << actual << " samples for ch " << c << ", now "
+			  << wb->getReadSpace() << " to read" 
+			  << std::endl;
+#endif
+                if (actual < got) {
+                    std::cerr << "WARNING: Buffer overrun in channel " << c
+                              << ": wrote " << actual << " of " << got
+                              << " samples" << std::endl;
+                }
+            }
+	}
+
+	m_writeBufferFill = f;
+	if (readWriteEqual) m_readBufferFill = f;
+
+	//!!! how do we know when ended? need to mark up a fully-buffered flag and check this if we find the buffers empty in getSourceSamples
+    }
+
+    return true;
+}    
+
+size_t
+AudioCallbackPlaySource::mixModels(size_t &frame, size_t count, float **buffers)
+{
+    size_t processed = 0;
+    size_t chunkStart = frame;
+    size_t chunkSize = count;
+    size_t selectionSize = 0;
+    size_t nextChunkStart = chunkStart + chunkSize;
+    
+    bool looping = m_viewManager->getPlayLoopMode();
+    bool constrained = (m_viewManager->getPlaySelectionMode() &&
+			!m_viewManager->getSelections().empty());
+
+    static float **chunkBufferPtrs = 0;
+    static size_t chunkBufferPtrCount = 0;
+    size_t channels = getTargetChannelCount();
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cout << "Selection playback: start " << frame << ", size " << count <<", channels " << channels << std::endl;
+#endif
+
+    if (chunkBufferPtrCount < channels) {
+	if (chunkBufferPtrs) delete[] chunkBufferPtrs;
+	chunkBufferPtrs = new float *[channels];
+	chunkBufferPtrCount = channels;
+    }
+
+    for (size_t c = 0; c < channels; ++c) {
+	chunkBufferPtrs[c] = buffers[c];
+    }
+
+    while (processed < count) {
+	
+	chunkSize = count - processed;
+	nextChunkStart = chunkStart + chunkSize;
+	selectionSize = 0;
+
+	size_t fadeIn = 0, fadeOut = 0;
+
+	if (constrained) {
+	    
+	    Selection selection =
+		m_viewManager->getContainingSelection(chunkStart, true);
+	    
+	    if (selection.isEmpty()) {
+		if (looping) {
+		    selection = *m_viewManager->getSelections().begin();
+		    chunkStart = selection.getStartFrame();
+		    fadeIn = 50;
+		}
+	    }
+
+	    if (selection.isEmpty()) {
+
+		chunkSize = 0;
+		nextChunkStart = chunkStart;
+
+	    } else {
+
+		selectionSize =
+		    selection.getEndFrame() -
+		    selection.getStartFrame();
+
+		if (chunkStart < selection.getStartFrame()) {
+		    chunkStart = selection.getStartFrame();
+		    fadeIn = 50;
+		}
+
+		nextChunkStart = chunkStart + chunkSize;
+
+		if (nextChunkStart >= selection.getEndFrame()) {
+		    nextChunkStart = selection.getEndFrame();
+		    fadeOut = 50;
+		}
+
+		chunkSize = nextChunkStart - chunkStart;
+	    }
+	
+	} else if (looping && m_lastModelEndFrame > 0) {
+
+	    if (chunkStart >= m_lastModelEndFrame) {
+		chunkStart = 0;
+	    }
+	    if (chunkSize > m_lastModelEndFrame - chunkStart) {
+		chunkSize = m_lastModelEndFrame - chunkStart;
+	    }
+	    nextChunkStart = chunkStart + chunkSize;
+	}
+	
+//	std::cout << "chunkStart " << chunkStart << ", chunkSize " << chunkSize << ", nextChunkStart " << nextChunkStart << ", frame " << frame << ", count " << count << ", processed " << processed << std::endl;
+
+	if (!chunkSize) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	    std::cout << "Ending selection playback at " << nextChunkStart << std::endl;
+#endif
+	    // We need to maintain full buffers so that the other
+	    // thread can tell where it's got to in the playback -- so
+	    // return the full amount here
+	    frame = frame + count;
+	    return count;
+	}
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	std::cout << "Selection playback: chunk at " << chunkStart << " -> " << nextChunkStart << " (size " << chunkSize << ")" << std::endl;
+#endif
+
+	size_t got = 0;
+
+	if (selectionSize < 100) {
+	    fadeIn = 0;
+	    fadeOut = 0;
+	} else if (selectionSize < 300) {
+	    if (fadeIn > 0) fadeIn = 10;
+	    if (fadeOut > 0) fadeOut = 10;
+	}
+
+	if (fadeIn > 0) {
+	    if (processed * 2 < fadeIn) {
+		fadeIn = processed * 2;
+	    }
+	}
+
+	if (fadeOut > 0) {
+	    if ((count - processed - chunkSize) * 2 < fadeOut) {
+		fadeOut = (count - processed - chunkSize) * 2;
+	    }
+	}
+
+	for (std::set<Model *>::iterator mi = m_models.begin();
+	     mi != m_models.end(); ++mi) {
+	    
+	    got = m_audioGenerator->mixModel(*mi, chunkStart, 
+					     chunkSize, chunkBufferPtrs,
+					     fadeIn, fadeOut);
+	}
+
+	for (size_t c = 0; c < channels; ++c) {
+	    chunkBufferPtrs[c] += chunkSize;
+	}
+
+	processed += chunkSize;
+	chunkStart = nextChunkStart;
+    }
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cout << "Returning selection playback " << processed << " frames to " << nextChunkStart << std::endl;
+#endif
+
+    frame = nextChunkStart;
+    return processed;
+}
+
+void
+AudioCallbackPlaySource::unifyRingBuffers()
+{
+    if (m_readBuffers == m_writeBuffers) return;
+
+    // only unify if there will be something to read
+    for (size_t c = 0; c < getTargetChannelCount(); ++c) {
+	RingBuffer<float> *wb = getWriteRingBuffer(c);
+	if (wb) {
+	    if (wb->getReadSpace() < m_blockSize * 2) {
+		if ((m_writeBufferFill + m_blockSize * 2) < 
+		    m_lastModelEndFrame) {
+		    // OK, we don't have enough and there's more to
+		    // read -- don't unify until we can do better
+		    return;
+		}
+	    }
+	    break;
+	}
+    }
+
+    size_t rf = m_readBufferFill;
+    RingBuffer<float> *rb = getReadRingBuffer(0);
+    if (rb) {
+	size_t rs = rb->getReadSpace();
+	//!!! incorrect when in non-contiguous selection, see comments elsewhere
+//	std::cout << "rs = " << rs << std::endl;
+	if (rs < rf) rf -= rs;
+	else rf = 0;
+    }
+    
+    //std::cout << "m_readBufferFill = " << m_readBufferFill << ", rf = " << rf << ", m_writeBufferFill = " << m_writeBufferFill << std::endl;
+
+    size_t wf = m_writeBufferFill;
+    size_t skip = 0;
+    for (size_t c = 0; c < getTargetChannelCount(); ++c) {
+	RingBuffer<float> *wb = getWriteRingBuffer(c);
+	if (wb) {
+	    if (c == 0) {
+		
+		size_t wrs = wb->getReadSpace();
+//		std::cout << "wrs = " << wrs << std::endl;
+
+		if (wrs < wf) wf -= wrs;
+		else wf = 0;
+//		std::cout << "wf = " << wf << std::endl;
+		
+		if (wf < rf) skip = rf - wf;
+		if (skip == 0) break;
+	    }
+
+//	    std::cout << "skipping " << skip << std::endl;
+	    wb->skip(skip);
+	}
+    }
+		    
+    m_bufferScavenger.claim(m_readBuffers);
+    m_readBuffers = m_writeBuffers;
+    m_readBufferFill = m_writeBufferFill;
+//    std::cout << "unified" << std::endl;
+}
+
+void
+AudioCallbackPlaySource::FillThread::run()
+{
+    AudioCallbackPlaySource &s(m_source);
+    
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cout << "AudioCallbackPlaySourceFillThread starting" << std::endl;
+#endif
+
+    s.m_mutex.lock();
+
+    bool previouslyPlaying = s.m_playing;
+    bool work = false;
+
+    while (!s.m_exiting) {
+
+	s.unifyRingBuffers();
+	s.m_bufferScavenger.scavenge();
+        s.m_pluginScavenger.scavenge();
+	s.m_timeStretcherScavenger.scavenge();
+
+	if (work && s.m_playing && s.getSourceSampleRate()) {
+	    
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	    std::cout << "AudioCallbackPlaySourceFillThread: not waiting" << std::endl;
+#endif
+
+	    s.m_mutex.unlock();
+	    s.m_mutex.lock();
+
+	} else {
+	    
+	    float ms = 100;
+	    if (s.getSourceSampleRate() > 0) {
+		ms = float(m_ringBufferSize) / float(s.getSourceSampleRate()) * 1000.0;
+	    }
+	    
+	    if (s.m_playing) ms /= 10;
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+            if (!s.m_playing) std::cout << std::endl;
+	    std::cout << "AudioCallbackPlaySourceFillThread: waiting for " << ms << "ms..." << std::endl;
+#endif
+	    
+	    s.m_condition.wait(&s.m_mutex, size_t(ms));
+	}
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	std::cout << "AudioCallbackPlaySourceFillThread: awoken" << std::endl;
+#endif
+
+	work = false;
+
+	if (!s.getSourceSampleRate()) continue;
+
+	bool playing = s.m_playing;
+
+	if (playing && !previouslyPlaying) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+	    std::cout << "AudioCallbackPlaySourceFillThread: playback state changed, resetting" << std::endl;
+#endif
+	    for (size_t c = 0; c < s.getTargetChannelCount(); ++c) {
+		RingBuffer<float> *rb = s.getReadRingBuffer(c);
+		if (rb) rb->reset();
+	    }
+	}
+	previouslyPlaying = playing;
+
+	work = s.fillBuffers();
+    }
+
+    s.m_mutex.unlock();
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioCallbackPlaySource.h	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,344 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam and QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef _AUDIO_CALLBACK_PLAY_SOURCE_H_
+#define _AUDIO_CALLBACK_PLAY_SOURCE_H_
+
+#include "base/RingBuffer.h"
+#include "base/AudioPlaySource.h"
+#include "base/PropertyContainer.h"
+#include "base/Scavenger.h"
+
+#include <QObject>
+#include <QMutex>
+#include <QWaitCondition>
+
+#include "base/Thread.h"
+
+#include <samplerate.h>
+
+#include <set>
+#include <map>
+
+class Model;
+class ViewManager;
+class AudioGenerator;
+class PlayParameters;
+class PhaseVocoderTimeStretcher;
+class RealTimePluginInstance;
+
+/**
+ * AudioCallbackPlaySource manages audio data supply to callback-based
+ * audio APIs such as JACK or CoreAudio.  It maintains one ring buffer
+ * per channel, filled during playback by a non-realtime thread, and
+ * provides a method for a realtime thread to pick up the latest
+ * available sample data from these buffers.
+ */
+class AudioCallbackPlaySource : public virtual QObject,
+				public AudioPlaySource
+{
+    Q_OBJECT
+
+public:
+    AudioCallbackPlaySource(ViewManager *);
+    virtual ~AudioCallbackPlaySource();
+    
+    /**
+     * Add a data model to be played from.  The source can mix
+     * playback from a number of sources including dense and sparse
+     * models.  The models must match in sample rate, but they don't
+     * have to have identical numbers of channels.
+     */
+    virtual void addModel(Model *model);
+
+    /**
+     * Remove a model.
+     */
+    virtual void removeModel(Model *model);
+
+    /**
+     * Remove all models.  (Silence will ensue.)
+     */
+    virtual void clearModels();
+
+    /**
+     * Start making data available in the ring buffers for playback,
+     * from the given frame.  If playback is already under way, reseek
+     * to the given frame and continue.
+     */
+    virtual void play(size_t startFrame);
+
+    /**
+     * Stop playback and ensure that no more data is returned.
+     */
+    virtual void stop();
+
+    /**
+     * Return whether playback is currently supposed to be happening.
+     */
+    virtual bool isPlaying() const { return m_playing; }
+
+    /**
+     * Return the frame number that is currently expected to be coming
+     * out of the speakers.  (i.e. compensating for playback latency.)
+     */
+    virtual size_t getCurrentPlayingFrame();
+
+    /**
+     * Return the frame at which playback is expected to end (if not looping).
+     */
+    virtual size_t getPlayEndFrame() { return m_lastModelEndFrame; }
+
+    /**
+     * Set the block size of the target audio device.  This should
+     * be called by the target class.
+     */
+    void setTargetBlockSize(size_t);
+
+    /**
+     * Get the block size of the target audio device.
+     */
+    size_t getTargetBlockSize() const;
+
+    /**
+     * Set the playback latency of the target audio device, in frames
+     * at the target sample rate.  This is the difference between the
+     * frame currently "leaving the speakers" and the last frame (or
+     * highest last frame across all channels) requested via
+     * getSamples().  The default is zero.
+     */
+    void setTargetPlayLatency(size_t);
+
+    /**
+     * Get the playback latency of the target audio device.
+     */
+    size_t getTargetPlayLatency() const;
+
+    /**
+     * Specify that the target audio device has a fixed sample rate
+     * (i.e. cannot accommodate arbitrary sample rates based on the
+     * source).  If the target sets this to something other than the
+     * source sample rate, this class will resample automatically to
+     * fit.
+     */
+    void setTargetSampleRate(size_t);
+
+    /**
+     * Return the sample rate set by the target audio device (or the
+     * source sample rate if the target hasn't set one).
+     */
+    virtual size_t getTargetSampleRate() const;
+
+    /**
+     * Set the current output levels for metering (for call from the
+     * target)
+     */
+    void setOutputLevels(float left, float right);
+
+    /**
+     * Return the current (or thereabouts) output levels in the range
+     * 0.0 -> 1.0, for metering purposes.
+     */
+    virtual bool getOutputLevels(float &left, float &right);
+
+    /**
+     * Get the number of channels of audio that in the source models.
+     * This may safely be called from a realtime thread.  Returns 0 if
+     * there is no source yet available.
+     */
+    size_t getSourceChannelCount() const;
+
+    /**
+     * Get the number of channels of audio that will be provided
+     * to the play target.  This may be more than the source channel
+     * count: for example, a mono source will provide 2 channels
+     * after pan.
+     * This may safely be called from a realtime thread.  Returns 0 if
+     * there is no source yet available.
+     */
+    size_t getTargetChannelCount() const;
+
+    /**
+     * Get the actual sample rate of the source material.  This may
+     * safely be called from a realtime thread.  Returns 0 if there is
+     * no source yet available.
+     */
+    virtual size_t getSourceSampleRate() const;
+
+    /**
+     * Get "count" samples (at the target sample rate) of the mixed
+     * audio data, in all channels.  This may safely be called from a
+     * realtime thread.
+     */
+    size_t getSourceSamples(size_t count, float **buffer);
+
+    /**
+     * Set the time stretcher factor (i.e. playback speed).  Also
+     * specify whether the time stretcher will be variable rate
+     * (sharpening transients), and whether time stretching will be
+     * carried out on data mixed down to mono for speed.
+     */
+    void setTimeStretch(float factor, bool sharpen, bool mono);
+
+    /**
+     * Set the resampler quality, 0 - 2 where 0 is fastest and 2 is
+     * highest quality.
+     */
+    void setResampleQuality(int q);
+
+    /**
+     * Set a single real-time plugin as a processing effect for
+     * auditioning during playback.
+     *
+     * The plugin must have been initialised with
+     * getTargetChannelCount() channels and a getTargetBlockSize()
+     * sample frame processing block size.
+     *
+     * This playback source takes ownership of the plugin, which will
+     * be deleted at some point after the following call to
+     * setAuditioningPlugin (depending on real-time constraints).
+     *
+     * Pass a null pointer to remove the current auditioning plugin,
+     * if any.
+     */
+    void setAuditioningPlugin(RealTimePluginInstance *plugin);
+
+    /**
+     * Specify that only the given set of models should be played.
+     */
+    void setSoloModelSet(std::set<Model *>s);
+
+    /**
+     * Specify that all models should be played as normal (if not
+     * muted).
+     */
+    void clearSoloModelSet();
+
+signals:
+    void modelReplaced();
+
+    void playStatusChanged(bool isPlaying);
+
+    void sampleRateMismatch(size_t requested, size_t available, bool willResample);
+
+    void audioOverloadPluginDisabled();
+
+public slots:
+    void audioProcessingOverload();
+
+protected slots:
+    void selectionChanged();
+    void playLoopModeChanged();
+    void playSelectionModeChanged();
+    void playParametersChanged(PlayParameters *);
+    void preferenceChanged(PropertyContainer::PropertyName);
+    void modelChanged(size_t startFrame, size_t endFrame);
+
+protected:
+    ViewManager                     *m_viewManager;
+    AudioGenerator                  *m_audioGenerator;
+
+    class RingBufferVector : public std::vector<RingBuffer<float> *> {
+    public:
+	virtual ~RingBufferVector() {
+	    while (!empty()) {
+		delete *begin();
+		erase(begin());
+	    }
+	}
+    };
+
+    std::set<Model *>                 m_models;
+    RingBufferVector                 *m_readBuffers;
+    RingBufferVector                 *m_writeBuffers;
+    size_t                            m_readBufferFill;
+    size_t                            m_writeBufferFill;
+    Scavenger<RingBufferVector>       m_bufferScavenger;
+    size_t                            m_sourceChannelCount;
+    size_t                            m_blockSize;
+    size_t                            m_sourceSampleRate;
+    size_t                            m_targetSampleRate;
+    size_t                            m_playLatency;
+    bool                              m_playing;
+    bool                              m_exiting;
+    size_t                            m_lastModelEndFrame;
+    static const size_t               m_ringBufferSize;
+    float                             m_outputLeft;
+    float                             m_outputRight;
+    RealTimePluginInstance           *m_auditioningPlugin;
+    bool                              m_auditioningPluginBypassed;
+    Scavenger<RealTimePluginInstance> m_pluginScavenger;
+
+    RingBuffer<float> *getWriteRingBuffer(size_t c) {
+	if (m_writeBuffers && c < m_writeBuffers->size()) {
+	    return (*m_writeBuffers)[c];
+	} else {
+	    return 0;
+	}
+    }
+
+    RingBuffer<float> *getReadRingBuffer(size_t c) {
+	RingBufferVector *rb = m_readBuffers;
+	if (rb && c < rb->size()) {
+	    return (*rb)[c];
+	} else {
+	    return 0;
+	}
+    }
+
+    void clearRingBuffers(bool haveLock = false, size_t count = 0);
+    void unifyRingBuffers();
+
+    PhaseVocoderTimeStretcher *m_timeStretcher;
+    Scavenger<PhaseVocoderTimeStretcher> m_timeStretcherScavenger;
+
+    // Called from fill thread, m_playing true, mutex held
+    // Return true if work done
+    bool fillBuffers();
+    
+    // Called from fillBuffers.  Return the number of frames written,
+    // which will be count or fewer.  Return in the frame argument the
+    // new buffered frame position (which may be earlier than the
+    // frame argument passed in, in the case of looping).
+    size_t mixModels(size_t &frame, size_t count, float **buffers);
+
+    // Called from getSourceSamples.
+    void applyAuditioningEffect(size_t count, float **buffers);
+
+    class FillThread : public Thread
+    {
+    public:
+	FillThread(AudioCallbackPlaySource &source) :
+            Thread(Thread::NonRTThread),
+	    m_source(source) { }
+
+	virtual void run();
+
+    protected:
+	AudioCallbackPlaySource &m_source;
+    };
+
+    QMutex m_mutex;
+    QWaitCondition m_condition;
+    FillThread *m_fillThread;
+    SRC_STATE *m_converter;
+    SRC_STATE *m_crapConverter; // for use when playing very fast
+    int m_resampleQuality;
+    void initialiseConverter();
+};
+
+#endif
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioCallbackPlayTarget.cpp	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,40 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "AudioCallbackPlayTarget.h"
+#include "AudioCallbackPlaySource.h"
+
+#include <iostream>
+
+AudioCallbackPlayTarget::AudioCallbackPlayTarget(AudioCallbackPlaySource *source) :
+    m_source(source),
+    m_outputGain(1.0)
+{
+    if (m_source) {
+	connect(m_source, SIGNAL(modelReplaced()),
+		this, SLOT(sourceModelReplaced()));
+    }
+}
+
+AudioCallbackPlayTarget::~AudioCallbackPlayTarget()
+{
+}
+
+void
+AudioCallbackPlayTarget::setOutputGain(float gain)
+{
+    m_outputGain = gain;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioCallbackPlayTarget.h	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,59 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef _AUDIO_CALLBACK_PLAY_TARGET_H_
+#define _AUDIO_CALLBACK_PLAY_TARGET_H_
+
+#include <QObject>
+
+class AudioCallbackPlaySource;
+
+class AudioCallbackPlayTarget : public QObject
+{
+    Q_OBJECT
+
+public:
+    AudioCallbackPlayTarget(AudioCallbackPlaySource *source);
+    virtual ~AudioCallbackPlayTarget();
+
+    virtual bool isOK() const = 0;
+
+    float getOutputGain() const {
+	return m_outputGain;
+    }
+
+public slots:
+    /**
+     * Set the playback gain (0.0 = silence, 1.0 = levels unmodified)
+     */
+    virtual void setOutputGain(float gain);
+
+    /**
+     * The main source model (providing the playback sample rate) has
+     * been changed.  The target should query the source's sample
+     * rate, set its output sample rate accordingly, and call back on
+     * the source's setTargetSampleRate to indicate what sample rate
+     * it succeeded in setting at the output.  If this differs from
+     * the model rate, the source will resample.
+     */
+    virtual void sourceModelReplaced() = 0;
+
+protected:
+    AudioCallbackPlaySource *m_source;
+    float m_outputGain;
+};
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioCoreAudioTarget.cpp	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,22 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifdef HAVE_COREAUDIO
+
+#include "AudioCoreAudioTarget.h"
+
+
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioCoreAudioTarget.h	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,64 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef _AUDIO_CORE_AUDIO_TARGET_H_
+#define _AUDIO_CORE_AUDIO_TARGET_H_
+
+#ifdef HAVE_COREAUDIO
+
+#include <jack/jack.h>
+#include <vector>
+
+#include <CoreAudio/CoreAudio.h>
+#include <CoreAudio/CoreAudioTypes.h>
+#include <AudioUnit/AUComponent.h>
+#include <AudioUnit/AudioUnitProperties.h>
+#include <AudioUnit/AudioUnitParameters.h>
+#include <AudioUnit/AudioOutputUnit.h>
+
+#include "AudioCallbackPlayTarget.h"
+
+class AudioCallbackPlaySource;
+
+class AudioCoreAudioTarget : public AudioCallbackPlayTarget
+{
+    Q_OBJECT
+
+public:
+    AudioCoreAudioTarget(AudioCallbackPlaySource *source);
+    ~AudioCoreAudioTarget();
+
+    virtual bool isOK() const;
+
+public slots:
+    virtual void sourceModelReplaced();
+
+protected:
+    OSStatus process(void *data,
+		     AudioUnitRenderActionFlags *flags,
+		     const AudioTimeStamp *timestamp,
+		     unsigned int inbus,
+		     unsigned int inframes,
+		     AudioBufferList *ioData);
+
+    int m_bufferSize;
+    int m_sampleRate;
+    int m_latency;
+};
+
+#endif /* HAVE_COREAUDIO */
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioGenerator.cpp	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,799 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "AudioGenerator.h"
+
+#include "base/TempDirectory.h"
+#include "base/PlayParameters.h"
+#include "base/PlayParameterRepository.h"
+#include "base/Pitch.h"
+#include "base/Exceptions.h"
+
+#include "data/model/NoteModel.h"
+#include "data/model/DenseTimeValueModel.h"
+#include "data/model/SparseOneDimensionalModel.h"
+
+#include "plugin/RealTimePluginFactory.h"
+#include "plugin/RealTimePluginInstance.h"
+#include "plugin/PluginIdentifier.h"
+#include "plugin/PluginXml.h"
+#include "plugin/api/alsa/seq_event.h"
+
+#include <iostream>
+#include <math.h>
+
+#include <QDir>
+#include <QFile>
+
+const size_t
+AudioGenerator::m_pluginBlockSize = 2048;
+
+QString
+AudioGenerator::m_sampleDir = "";
+
+//#define DEBUG_AUDIO_GENERATOR 1
+
+AudioGenerator::AudioGenerator() :
+    m_sourceSampleRate(0),
+    m_targetChannelCount(1),
+    m_soloing(false)
+{
+    connect(PlayParameterRepository::getInstance(),
+            SIGNAL(playPluginIdChanged(const Model *, QString)),
+            this,
+            SLOT(playPluginIdChanged(const Model *, QString)));
+
+    connect(PlayParameterRepository::getInstance(),
+            SIGNAL(playPluginConfigurationChanged(const Model *, QString)),
+            this,
+            SLOT(playPluginConfigurationChanged(const Model *, QString)));
+}
+
+AudioGenerator::~AudioGenerator()
+{
+}
+
+bool
+AudioGenerator::canPlay(const Model *model)
+{
+    if (dynamic_cast<const DenseTimeValueModel *>(model) ||
+	dynamic_cast<const SparseOneDimensionalModel *>(model) ||
+	dynamic_cast<const NoteModel *>(model)) {
+	return true;
+    } else {
+	return false;
+    }
+}
+
+bool
+AudioGenerator::addModel(Model *model)
+{
+    if (m_sourceSampleRate == 0) {
+
+	m_sourceSampleRate = model->getSampleRate();
+
+    } else {
+
+	DenseTimeValueModel *dtvm =
+	    dynamic_cast<DenseTimeValueModel *>(model);
+
+	if (dtvm) {
+	    m_sourceSampleRate = model->getSampleRate();
+	    return true;
+	}
+    }
+
+    RealTimePluginInstance *plugin = loadPluginFor(model);
+    if (plugin) {
+        QMutexLocker locker(&m_mutex);
+        m_synthMap[model] = plugin;
+        return true;
+    }
+
+    return false;
+}
+
+void
+AudioGenerator::playPluginIdChanged(const Model *model, QString)
+{
+    if (m_synthMap.find(model) == m_synthMap.end()) return;
+    
+    RealTimePluginInstance *plugin = loadPluginFor(model);
+    if (plugin) {
+        QMutexLocker locker(&m_mutex);
+        delete m_synthMap[model];
+        m_synthMap[model] = plugin;
+    }
+}
+
+void
+AudioGenerator::playPluginConfigurationChanged(const Model *model,
+                                               QString configurationXml)
+{
+//    std::cerr << "AudioGenerator::playPluginConfigurationChanged" << std::endl;
+
+    if (m_synthMap.find(model) == m_synthMap.end()) {
+        std::cerr << "AudioGenerator::playPluginConfigurationChanged: We don't know about this plugin" << std::endl;
+        return;
+    }
+
+    RealTimePluginInstance *plugin = m_synthMap[model];
+    if (plugin) {
+        PluginXml(plugin).setParametersFromXml(configurationXml);
+    }
+}
+
+QString
+AudioGenerator::getDefaultPlayPluginId(const Model *model)
+{
+    const SparseOneDimensionalModel *sodm =
+        dynamic_cast<const SparseOneDimensionalModel *>(model);
+    if (sodm) {
+        return QString("dssi:%1:sample_player").
+            arg(PluginIdentifier::BUILTIN_PLUGIN_SONAME);
+    }
+
+    const NoteModel *nm = dynamic_cast<const NoteModel *>(model);
+    if (nm) {
+        return QString("dssi:%1:sample_player").
+            arg(PluginIdentifier::BUILTIN_PLUGIN_SONAME);
+    }  
+    
+    return "";
+}
+
+QString
+AudioGenerator::getDefaultPlayPluginConfiguration(const Model *model)
+{
+    QString program = "";
+
+    const SparseOneDimensionalModel *sodm =
+        dynamic_cast<const SparseOneDimensionalModel *>(model);
+    if (sodm) {
+        program = "tap";
+    }
+
+    const NoteModel *nm = dynamic_cast<const NoteModel *>(model);
+    if (nm) {
+        program = "piano";
+    }
+
+    if (program == "") return "";
+
+    return
+        QString("<plugin configuration=\"%1\" program=\"%2\"/>")
+        .arg(XmlExportable::encodeEntities
+             (QString("sampledir=%1")
+              .arg(PluginXml::encodeConfigurationChars(getSampleDir()))))
+        .arg(XmlExportable::encodeEntities(program));
+}    
+
+QString
+AudioGenerator::getSampleDir()
+{
+    if (m_sampleDir != "") return m_sampleDir;
+
+    try {
+        m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
+    } catch (DirectoryCreationFailed f) {
+        std::cerr << "WARNING: AudioGenerator::getSampleDir: Failed to create "
+                  << "temporary sample directory" << std::endl;
+        m_sampleDir = "";
+        return "";
+    }
+
+    QDir sampleResourceDir(":/samples", "*.wav");
+
+    for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
+
+        QString fileName(sampleResourceDir[i]);
+        QFile file(sampleResourceDir.filePath(fileName));
+
+        if (!file.copy(QDir(m_sampleDir).filePath(fileName))) {
+            std::cerr << "WARNING: AudioGenerator::getSampleDir: "
+                      << "Unable to copy " << fileName.toStdString()
+                      << " into temporary directory \""
+                      << m_sampleDir.toStdString() << "\"" << std::endl;
+        }
+    }
+
+    return m_sampleDir;
+}
+
+void
+AudioGenerator::setSampleDir(RealTimePluginInstance *plugin)
+{
+    plugin->configure("sampledir", getSampleDir().toStdString());
+} 
+
+RealTimePluginInstance *
+AudioGenerator::loadPluginFor(const Model *model)
+{
+    QString pluginId, configurationXml;
+
+    PlayParameters *parameters =
+	PlayParameterRepository::getInstance()->getPlayParameters(model);
+    if (parameters) {
+        pluginId = parameters->getPlayPluginId();
+        configurationXml = parameters->getPlayPluginConfiguration();
+    }
+
+    if (pluginId == "") {
+        pluginId = getDefaultPlayPluginId(model);
+        configurationXml = getDefaultPlayPluginConfiguration(model);
+    }
+
+    if (pluginId == "") return 0;
+
+    RealTimePluginInstance *plugin = loadPlugin(pluginId, "");
+    if (!plugin) return 0;
+
+    if (configurationXml != "") {
+        PluginXml(plugin).setParametersFromXml(configurationXml);
+    }
+
+    if (parameters) {
+        parameters->setPlayPluginId(pluginId);
+        parameters->setPlayPluginConfiguration(configurationXml);
+    }
+
+    return plugin;
+}
+
+RealTimePluginInstance *
+AudioGenerator::loadPlugin(QString pluginId, QString program)
+{
+    RealTimePluginFactory *factory =
+	RealTimePluginFactory::instanceFor(pluginId);
+    
+    if (!factory) {
+	std::cerr << "Failed to get plugin factory" << std::endl;
+	return false;
+    }
+	
+    RealTimePluginInstance *instance =
+	factory->instantiatePlugin
+	(pluginId, 0, 0, m_sourceSampleRate, m_pluginBlockSize, m_targetChannelCount);
+
+    if (!instance) {
+	std::cerr << "Failed to instantiate plugin " << pluginId.toStdString() << std::endl;
+        return 0;
+    }
+
+    setSampleDir(instance);
+
+    for (unsigned int i = 0; i < instance->getParameterCount(); ++i) {
+        instance->setParameterValue(i, instance->getParameterDefault(i));
+    }
+    std::string defaultProgram = instance->getProgram(0, 0);
+    if (defaultProgram != "") {
+//        std::cerr << "first selecting default program " << defaultProgram << std::endl;
+        instance->selectProgram(defaultProgram);
+    }
+    if (program != "") {
+//        std::cerr << "now selecting desired program " << program.toStdString() << std::endl;
+        instance->selectProgram(program.toStdString());
+    }
+    instance->setIdealChannelCount(m_targetChannelCount); // reset!
+
+    return instance;
+}
+
+void
+AudioGenerator::removeModel(Model *model)
+{
+    SparseOneDimensionalModel *sodm =
+	dynamic_cast<SparseOneDimensionalModel *>(model);
+    if (!sodm) return; // nothing to do
+
+    QMutexLocker locker(&m_mutex);
+
+    if (m_synthMap.find(sodm) == m_synthMap.end()) return;
+
+    RealTimePluginInstance *instance = m_synthMap[sodm];
+    m_synthMap.erase(sodm);
+    delete instance;
+}
+
+void
+AudioGenerator::clearModels()
+{
+    QMutexLocker locker(&m_mutex);
+    while (!m_synthMap.empty()) {
+	RealTimePluginInstance *instance = m_synthMap.begin()->second;
+	m_synthMap.erase(m_synthMap.begin());
+	delete instance;
+    }
+}    
+
+void
+AudioGenerator::reset()
+{
+    QMutexLocker locker(&m_mutex);
+    for (PluginMap::iterator i = m_synthMap.begin(); i != m_synthMap.end(); ++i) {
+	if (i->second) {
+	    i->second->silence();
+	    i->second->discardEvents();
+	}
+    }
+
+    m_noteOffs.clear();
+}
+
+void
+AudioGenerator::setTargetChannelCount(size_t targetChannelCount)
+{
+    if (m_targetChannelCount == targetChannelCount) return;
+
+//    std::cerr << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << std::endl;
+
+    QMutexLocker locker(&m_mutex);
+    m_targetChannelCount = targetChannelCount;
+
+    for (PluginMap::iterator i = m_synthMap.begin(); i != m_synthMap.end(); ++i) {
+	if (i->second) i->second->setIdealChannelCount(targetChannelCount);
+    }
+}
+
+size_t
+AudioGenerator::getBlockSize() const
+{
+    return m_pluginBlockSize;
+}
+
+void
+AudioGenerator::setSoloModelSet(std::set<Model *> s)
+{
+    QMutexLocker locker(&m_mutex);
+
+    m_soloModelSet = s;
+    m_soloing = true;
+}
+
+void
+AudioGenerator::clearSoloModelSet()
+{
+    QMutexLocker locker(&m_mutex);
+
+    m_soloModelSet.clear();
+    m_soloing = false;
+}
+
+size_t
+AudioGenerator::mixModel(Model *model, size_t startFrame, size_t frameCount,
+			 float **buffer, size_t fadeIn, size_t fadeOut)
+{
+    if (m_sourceSampleRate == 0) {
+	std::cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << std::endl;
+	return frameCount;
+    }
+
+    QMutexLocker locker(&m_mutex);
+
+    PlayParameters *parameters =
+	PlayParameterRepository::getInstance()->getPlayParameters(model);
+    if (!parameters) return frameCount;
+
+    bool playing = !parameters->isPlayMuted();
+    if (!playing) {
+#ifdef DEBUG_AUDIO_GENERATOR
+        std::cout << "AudioGenerator::mixModel(" << model << "): muted" << std::endl;
+#endif
+        return frameCount;
+    }
+
+    if (m_soloing) {
+        if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
+#ifdef DEBUG_AUDIO_GENERATOR
+            std::cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << std::endl;
+#endif
+            return frameCount;
+        }
+    }
+
+    float gain = parameters->getPlayGain();
+    float pan = parameters->getPlayPan();
+
+    DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
+    if (dtvm) {
+	return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
+				      buffer, gain, pan, fadeIn, fadeOut);
+    }
+
+    SparseOneDimensionalModel *sodm = dynamic_cast<SparseOneDimensionalModel *>
+	(model);
+    if (sodm) {
+	return mixSparseOneDimensionalModel(sodm, startFrame, frameCount,
+					    buffer, gain, pan, fadeIn, fadeOut);
+    }
+
+    NoteModel *nm = dynamic_cast<NoteModel *>(model);
+    if (nm) {
+	return mixNoteModel(nm, startFrame, frameCount,
+			    buffer, gain, pan, fadeIn, fadeOut);
+    }
+
+    return frameCount;
+}
+
+size_t
+AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
+				       size_t startFrame, size_t frames,
+				       float **buffer, float gain, float pan,
+				       size_t fadeIn, size_t fadeOut)
+{
+    static float *channelBuffer = 0;
+    static size_t channelBufSiz = 0;
+
+    size_t totalFrames = frames + fadeIn/2 + fadeOut/2;
+
+    if (channelBufSiz < totalFrames) {
+	delete[] channelBuffer;
+	channelBuffer = new float[totalFrames];
+	channelBufSiz = totalFrames;
+    }
+    
+    size_t got = 0;
+    size_t prevChannel = 999;
+
+    for (size_t c = 0; c < m_targetChannelCount; ++c) {
+
+	size_t sourceChannel = (c % dtvm->getChannelCount());
+
+//	std::cerr << "mixing channel " << c << " from source channel " << sourceChannel << std::endl;
+
+	float channelGain = gain;
+	if (pan != 0.0) {
+	    if (c == 0) {
+		if (pan > 0.0) channelGain *= 1.0 - pan;
+	    } else {
+		if (pan < 0.0) channelGain *= pan + 1.0;
+	    }
+	}
+
+	if (prevChannel != sourceChannel) {
+	    if (startFrame >= fadeIn/2) {
+		got = dtvm->getData
+		    (sourceChannel,
+		     startFrame - fadeIn/2,
+                     frames + fadeOut/2 + fadeIn/2,
+		     channelBuffer);
+	    } else {
+		size_t missing = fadeIn/2 - startFrame;
+		got = dtvm->getData
+		    (sourceChannel,
+		     startFrame,
+                     frames + fadeOut/2,
+		     channelBuffer + missing);
+	    }	    
+	}
+	prevChannel = sourceChannel;
+
+	for (size_t i = 0; i < fadeIn/2; ++i) {
+	    float *back = buffer[c];
+	    back -= fadeIn/2;
+	    back[i] += (channelGain * channelBuffer[i] * i) / fadeIn;
+	}
+
+	for (size_t i = 0; i < frames + fadeOut/2; ++i) {
+	    float mult = channelGain;
+	    if (i < fadeIn/2) {
+		mult = (mult * i) / fadeIn;
+	    }
+	    if (i > frames - fadeOut/2) {
+		mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut;
+	    }
+	    buffer[c][i] += mult * channelBuffer[i];
+	}
+    }
+
+    return got;
+}
+  
+size_t
+AudioGenerator::mixSparseOneDimensionalModel(SparseOneDimensionalModel *sodm,
+					     size_t startFrame, size_t frames,
+					     float **buffer, float gain, float pan,
+					     size_t /* fadeIn */,
+					     size_t /* fadeOut */)
+{
+    RealTimePluginInstance *plugin = m_synthMap[sodm];
+    if (!plugin) return 0;
+
+    size_t latency = plugin->getLatency();
+    size_t blocks = frames / m_pluginBlockSize;
+    
+    //!!! hang on -- the fact that the audio callback play source's
+    //buffer is a multiple of the plugin's buffer size doesn't mean
+    //that we always get called for a multiple of it here (because it
+    //also depends on the JACK block size).  how should we ensure that
+    //all models write the same amount in to the mix, and that we
+    //always have a multiple of the plugin buffer size?  I guess this
+    //class has to be queryable for the plugin buffer size & the
+    //callback play source has to use that as a multiple for all the
+    //calls to mixModel
+
+    size_t got = blocks * m_pluginBlockSize;
+
+#ifdef DEBUG_AUDIO_GENERATOR
+    std::cout << "mixModel [sparse]: frames " << frames
+	      << ", blocks " << blocks << std::endl;
+#endif
+
+    snd_seq_event_t onEv;
+    onEv.type = SND_SEQ_EVENT_NOTEON;
+    onEv.data.note.channel = 0;
+    onEv.data.note.note = 64;
+    onEv.data.note.velocity = 100;
+
+    snd_seq_event_t offEv;
+    offEv.type = SND_SEQ_EVENT_NOTEOFF;
+    offEv.data.note.channel = 0;
+    offEv.data.note.velocity = 0;
+    
+    NoteOffSet &noteOffs = m_noteOffs[sodm];
+
+    for (size_t i = 0; i < blocks; ++i) {
+
+	size_t reqStart = startFrame + i * m_pluginBlockSize;
+
+	SparseOneDimensionalModel::PointList points =
+	    sodm->getPoints(reqStart + latency,
+			    reqStart + latency + m_pluginBlockSize);
+
+        Vamp::RealTime blockTime = Vamp::RealTime::frame2RealTime
+	    (startFrame + i * m_pluginBlockSize, m_sourceSampleRate);
+
+	for (SparseOneDimensionalModel::PointList::iterator pli =
+		 points.begin(); pli != points.end(); ++pli) {
+
+	    size_t pliFrame = pli->frame;
+
+	    if (pliFrame >= latency) pliFrame -= latency;
+
+	    if (pliFrame < reqStart ||
+		pliFrame >= reqStart + m_pluginBlockSize) continue;
+
+	    while (noteOffs.begin() != noteOffs.end() &&
+		   noteOffs.begin()->frame <= pliFrame) {
+
+                Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
+		    (noteOffs.begin()->frame, m_sourceSampleRate);
+
+		offEv.data.note.note = noteOffs.begin()->pitch;
+
+#ifdef DEBUG_AUDIO_GENERATOR
+		std::cerr << "mixModel [sparse]: sending note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl;
+#endif
+
+		plugin->sendEvent(eventTime, &offEv);
+		noteOffs.erase(noteOffs.begin());
+	    }
+
+            Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
+		(pliFrame, m_sourceSampleRate);
+	    
+	    plugin->sendEvent(eventTime, &onEv);
+
+#ifdef DEBUG_AUDIO_GENERATOR
+	    std::cout << "mixModel [sparse]: point at frame " << pliFrame << ", block start " << (startFrame + i * m_pluginBlockSize) << ", resulting time " << eventTime << std::endl;
+#endif
+	    
+	    size_t duration = 7000; // frames [for now]
+	    NoteOff noff;
+	    noff.pitch = onEv.data.note.note;
+	    noff.frame = pliFrame + duration;
+	    noteOffs.insert(noff);
+	}
+
+	while (noteOffs.begin() != noteOffs.end() &&
+	       noteOffs.begin()->frame <=
+	       startFrame + i * m_pluginBlockSize + m_pluginBlockSize) {
+
+            Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
+		(noteOffs.begin()->frame, m_sourceSampleRate);
+
+	    offEv.data.note.note = noteOffs.begin()->pitch;
+
+#ifdef DEBUG_AUDIO_GENERATOR
+		std::cerr << "mixModel [sparse]: sending leftover note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl;
+#endif
+
+	    plugin->sendEvent(eventTime, &offEv);
+	    noteOffs.erase(noteOffs.begin());
+	}
+	
+	plugin->run(blockTime);
+	float **outs = plugin->getAudioOutputBuffers();
+
+	for (size_t c = 0; c < m_targetChannelCount; ++c) {
+#ifdef DEBUG_AUDIO_GENERATOR
+	    std::cout << "mixModel [sparse]: adding " << m_pluginBlockSize << " samples from plugin output " << c << std::endl;
+#endif
+
+	    size_t sourceChannel = (c % plugin->getAudioOutputCount());
+
+	    float channelGain = gain;
+	    if (pan != 0.0) {
+		if (c == 0) {
+		    if (pan > 0.0) channelGain *= 1.0 - pan;
+		} else {
+		    if (pan < 0.0) channelGain *= pan + 1.0;
+		}
+	    }
+
+	    for (size_t j = 0; j < m_pluginBlockSize; ++j) {
+		buffer[c][i * m_pluginBlockSize + j] +=
+		    channelGain * outs[sourceChannel][j];
+	    }
+	}
+    }
+
+    return got;
+}
+
+    
+//!!! mucho duplication with above -- refactor
+size_t
+AudioGenerator::mixNoteModel(NoteModel *nm,
+			     size_t startFrame, size_t frames,
+			     float **buffer, float gain, float pan,
+			     size_t /* fadeIn */,
+			     size_t /* fadeOut */)
+{
+    RealTimePluginInstance *plugin = m_synthMap[nm];
+    if (!plugin) return 0;
+
+    size_t latency = plugin->getLatency();
+    size_t blocks = frames / m_pluginBlockSize;
+    
+    //!!! hang on -- the fact that the audio callback play source's
+    //buffer is a multiple of the plugin's buffer size doesn't mean
+    //that we always get called for a multiple of it here (because it
+    //also depends on the JACK block size).  how should we ensure that
+    //all models write the same amount in to the mix, and that we
+    //always have a multiple of the plugin buffer size?  I guess this
+    //class has to be queryable for the plugin buffer size & the
+    //callback play source has to use that as a multiple for all the
+    //calls to mixModel
+
+    size_t got = blocks * m_pluginBlockSize;
+
+#ifdef DEBUG_AUDIO_GENERATOR
+    std::cout << "mixModel [note]: frames " << frames
+	      << ", blocks " << blocks << std::endl;
+#endif
+
+    snd_seq_event_t onEv;
+    onEv.type = SND_SEQ_EVENT_NOTEON;
+    onEv.data.note.channel = 0;
+    onEv.data.note.note = 64;
+    onEv.data.note.velocity = 100;
+
+    snd_seq_event_t offEv;
+    offEv.type = SND_SEQ_EVENT_NOTEOFF;
+    offEv.data.note.channel = 0;
+    offEv.data.note.velocity = 0;
+    
+    NoteOffSet &noteOffs = m_noteOffs[nm];
+
+    for (size_t i = 0; i < blocks; ++i) {
+
+	size_t reqStart = startFrame + i * m_pluginBlockSize;
+
+	NoteModel::PointList points =
+	    nm->getPoints(reqStart + latency,
+			    reqStart + latency + m_pluginBlockSize);
+
+        Vamp::RealTime blockTime = Vamp::RealTime::frame2RealTime
+	    (startFrame + i * m_pluginBlockSize, m_sourceSampleRate);
+
+	for (NoteModel::PointList::iterator pli =
+		 points.begin(); pli != points.end(); ++pli) {
+
+	    size_t pliFrame = pli->frame;
+
+	    if (pliFrame >= latency) pliFrame -= latency;
+
+	    if (pliFrame < reqStart ||
+		pliFrame >= reqStart + m_pluginBlockSize) continue;
+
+	    while (noteOffs.begin() != noteOffs.end() &&
+		   noteOffs.begin()->frame <= pliFrame) {
+
+                Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
+		    (noteOffs.begin()->frame, m_sourceSampleRate);
+
+		offEv.data.note.note = noteOffs.begin()->pitch;
+
+#ifdef DEBUG_AUDIO_GENERATOR
+		std::cerr << "mixModel [note]: sending note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl;
+#endif
+
+		plugin->sendEvent(eventTime, &offEv);
+		noteOffs.erase(noteOffs.begin());
+	    }
+
+            Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
+		(pliFrame, m_sourceSampleRate);
+	    
+            if (nm->getScaleUnits() == "Hz") {
+                onEv.data.note.note = Pitch::getPitchForFrequency(pli->value);
+            } else {
+                onEv.data.note.note = lrintf(pli->value);
+            }
+
+	    plugin->sendEvent(eventTime, &onEv);
+
+#ifdef DEBUG_AUDIO_GENERATOR
+	    std::cout << "mixModel [note]: point at frame " << pliFrame << ", block start " << (startFrame + i * m_pluginBlockSize) << ", resulting time " << eventTime << std::endl;
+#endif
+	    
+	    size_t duration = pli->duration;
+            if (duration == 0 || duration == 1) {
+                duration = m_sourceSampleRate / 20;
+            }
+	    NoteOff noff;
+	    noff.pitch = onEv.data.note.note;
+	    noff.frame = pliFrame + duration;
+	    noteOffs.insert(noff);
+	}
+
+	while (noteOffs.begin() != noteOffs.end() &&
+	       noteOffs.begin()->frame <=
+	       startFrame + i * m_pluginBlockSize + m_pluginBlockSize) {
+
+            Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
+		(noteOffs.begin()->frame, m_sourceSampleRate);
+
+	    offEv.data.note.note = noteOffs.begin()->pitch;
+
+#ifdef DEBUG_AUDIO_GENERATOR
+		std::cerr << "mixModel [note]: sending leftover note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << std::endl;
+#endif
+
+	    plugin->sendEvent(eventTime, &offEv);
+	    noteOffs.erase(noteOffs.begin());
+	}
+	
+	plugin->run(blockTime);
+	float **outs = plugin->getAudioOutputBuffers();
+
+	for (size_t c = 0; c < m_targetChannelCount; ++c) {
+#ifdef DEBUG_AUDIO_GENERATOR
+	    std::cout << "mixModel [note]: adding " << m_pluginBlockSize << " samples from plugin output " << c << std::endl;
+#endif
+
+	    size_t sourceChannel = (c % plugin->getAudioOutputCount());
+
+	    float channelGain = gain;
+	    if (pan != 0.0) {
+		if (c == 0) {
+		    if (pan > 0.0) channelGain *= 1.0 - pan;
+		} else {
+		    if (pan < 0.0) channelGain *= pan + 1.0;
+		}
+	    }
+
+	    for (size_t j = 0; j < m_pluginBlockSize; ++j) {
+		buffer[c][i * m_pluginBlockSize + j] += 
+		    channelGain * outs[sourceChannel][j];
+	    }
+	}
+    }
+
+    return got;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioGenerator.h	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,158 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef _AUDIO_GENERATOR_H_
+#define _AUDIO_GENERATOR_H_
+
+class Model;
+class NoteModel;
+class DenseTimeValueModel;
+class SparseOneDimensionalModel;
+class RealTimePluginInstance;
+
+#include <QObject>
+#include <QMutex>
+
+#include <set>
+#include <map>
+
+class AudioGenerator : public QObject
+{
+    Q_OBJECT
+
+public:
+    AudioGenerator();
+    virtual ~AudioGenerator();
+
+    /**
+     * Return true if the given model is of a type that we generally
+     * know how to play.  This doesn't guarantee that a specific
+     * AudioGenerator will actually produce sounds for it (for
+     * example, it may turn out that a vital plugin is missing).
+     */
+    static bool canPlay(const Model *model);
+
+    static QString getDefaultPlayPluginId(const Model *model);
+    static QString getDefaultPlayPluginConfiguration(const Model *model);
+
+    /**
+     * Add a data model to be played from and initialise any necessary
+     * audio generation code.  Returns true if the model will be
+     * played.  (The return value test here is stricter than that for
+     * canPlay, above.)  The model will be added regardless of the
+     * return value.
+     */
+    virtual bool addModel(Model *model);
+
+    /**
+     * Remove a model.
+     */
+    virtual void removeModel(Model *model);
+
+    /**
+     * Remove all models.
+     */
+    virtual void clearModels();
+
+    /**
+     * Reset playback, clearing plugins and the like.
+     */
+    virtual void reset();
+
+    /**
+     * Set the target channel count.  The buffer parameter to mixModel
+     * must always point to at least this number of arrays.
+     */
+    virtual void setTargetChannelCount(size_t channelCount);
+
+    /**
+     * Return the internal processing block size.  The frameCount
+     * argument to all mixModel calls must be a multiple of this
+     * value.
+     */
+    virtual size_t getBlockSize() const;
+
+    /**
+     * Mix a single model into an output buffer.
+     */
+    virtual size_t mixModel(Model *model, size_t startFrame, size_t frameCount,
+			    float **buffer, size_t fadeIn = 0, size_t fadeOut = 0);
+
+    /**
+     * Specify that only the given set of models should be played.
+     */
+    virtual void setSoloModelSet(std::set<Model *>s);
+
+    /**
+     * Specify that all models should be played as normal (if not
+     * muted).
+     */
+    virtual void clearSoloModelSet();
+
+protected slots:
+    void playPluginIdChanged(const Model *, QString);
+    void playPluginConfigurationChanged(const Model *, QString);
+
+protected:
+    size_t       m_sourceSampleRate;
+    size_t       m_targetChannelCount;
+
+    bool m_soloing;
+    std::set<Model *> m_soloModelSet;
+
+    struct NoteOff {
+
+	int pitch;
+	size_t frame;
+
+	struct Comparator {
+	    bool operator()(const NoteOff &n1, const NoteOff &n2) const {
+		return n1.frame < n2.frame;
+	    }
+	};
+    };
+
+    typedef std::map<const Model *, RealTimePluginInstance *> PluginMap;
+
+    typedef std::set<NoteOff, NoteOff::Comparator> NoteOffSet;
+    typedef std::map<const Model *, NoteOffSet> NoteOffMap;
+
+    QMutex m_mutex;
+    PluginMap m_synthMap;
+    NoteOffMap m_noteOffs;
+    static QString m_sampleDir;
+
+    virtual RealTimePluginInstance *loadPluginFor(const Model *model);
+    virtual RealTimePluginInstance *loadPlugin(QString id, QString program);
+    static QString getSampleDir();
+    static void setSampleDir(RealTimePluginInstance *plugin);
+
+    virtual size_t mixDenseTimeValueModel
+    (DenseTimeValueModel *model, size_t startFrame, size_t frameCount,
+     float **buffer, float gain, float pan, size_t fadeIn, size_t fadeOut);
+
+    virtual size_t mixSparseOneDimensionalModel
+    (SparseOneDimensionalModel *model, size_t startFrame, size_t frameCount,
+     float **buffer, float gain, float pan, size_t fadeIn, size_t fadeOut);
+
+    virtual size_t mixNoteModel
+    (NoteModel *model, size_t startFrame, size_t frameCount,
+     float **buffer, float gain, float pan, size_t fadeIn, size_t fadeOut);
+
+    static const size_t m_pluginBlockSize;
+};
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioJACKTarget.cpp	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,402 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifdef HAVE_JACK
+
+#include "AudioJACKTarget.h"
+#include "AudioCallbackPlaySource.h"
+
+#include <iostream>
+#include <cmath>
+
+//#define DEBUG_AUDIO_JACK_TARGET 1
+
+#ifdef BUILD_STATIC
+#ifdef Q_OS_LINUX
+
+// Some lunacy to enable JACK support in static builds.  JACK isn't
+// supposed to be linked statically, because it depends on a
+// consistent shared memory layout between client library and daemon,
+// so it's very fragile in the face of version mismatches.
+//
+// Therefore for static builds on Linux we avoid linking against JACK
+// at all during the build, instead using dlopen and runtime symbol
+// lookup to switch on JACK support at runtime.  The following big
+// mess (down to the #endifs) is the code that implements this.
+
+static void *symbol(const char *name)
+{
+    static bool attempted = false;
+    static void *library = 0;
+    static std::map<const char *, void *> symbols;
+    if (symbols.find(name) != symbols.end()) return symbols[name];
+    if (!library) {
+        if (!attempted) {
+            library = ::dlopen("libjack.so.1", RTLD_NOW);
+            if (!library) library = ::dlopen("libjack.so.0", RTLD_NOW);
+            if (!library) library = ::dlopen("libjack.so", RTLD_NOW);
+            if (!library) {
+                std::cerr << "WARNING: AudioJACKTarget: Failed to load JACK library: "
+                          << ::dlerror() << " (tried .so, .so.0, .so.1)"
+                          << std::endl;
+            }
+            attempted = true;
+        }
+        if (!library) return 0;
+    }
+    void *symbol = ::dlsym(library, name);
+    if (!symbol) {
+        std::cerr << "WARNING: AudioJACKTarget: Failed to locate symbol "
+                  << name << ": " << ::dlerror() << std::endl;
+    }
+    symbols[name] = symbol;
+    return symbol;
+}
+
+static int dynamic_jack_set_process_callback(jack_client_t *client,
+                                             JackProcessCallback process_callback,
+                                             void *arg)
+{
+    typedef int (*func)(jack_client_t *client,
+                        JackProcessCallback process_callback,
+                        void *arg);
+    void *s = symbol("jack_set_process_callback");
+    if (!s) return 1;
+    func f = (func)s;
+    return f(client, process_callback, arg);
+}
+
+static int dynamic_jack_set_xrun_callback(jack_client_t *client,
+                                          JackXRunCallback xrun_callback,
+                                          void *arg)
+{
+    typedef int (*func)(jack_client_t *client,
+                        JackXRunCallback xrun_callback,
+                        void *arg);
+    void *s = symbol("jack_set_xrun_callback");
+    if (!s) return 1;
+    func f = (func)s;
+    return f(client, xrun_callback, arg);
+}
+
+static const char **dynamic_jack_get_ports(jack_client_t *client, 
+                                           const char *port_name_pattern, 
+                                           const char *type_name_pattern, 
+                                           unsigned long flags)
+{
+    typedef const char **(*func)(jack_client_t *client, 
+                                 const char *port_name_pattern, 
+                                 const char *type_name_pattern, 
+                                 unsigned long flags);
+    void *s = symbol("jack_get_ports");
+    if (!s) return 0;
+    func f = (func)s;
+    return f(client, port_name_pattern, type_name_pattern, flags);
+}
+
+static jack_port_t *dynamic_jack_port_register(jack_client_t *client,
+                                               const char *port_name,
+                                               const char *port_type,
+                                               unsigned long flags,
+                                               unsigned long buffer_size)
+{
+    typedef jack_port_t *(*func)(jack_client_t *client,
+                                 const char *port_name,
+                                 const char *port_type,
+                                 unsigned long flags,
+                                 unsigned long buffer_size);
+    void *s = symbol("jack_port_register");
+    if (!s) return 0;
+    func f = (func)s;
+    return f(client, port_name, port_type, flags, buffer_size);
+}
+
+static int dynamic_jack_connect(jack_client_t *client,
+                                const char *source,
+                                const char *dest)
+{
+    typedef int (*func)(jack_client_t *client,
+                        const char *source,
+                        const char *dest);
+    void *s = symbol("jack_connect");
+    if (!s) return 1;
+    func f = (func)s;
+    return f(client, source, dest);
+}
+
+static void *dynamic_jack_port_get_buffer(jack_port_t *port,
+                                          jack_nframes_t sz)
+{
+    typedef void *(*func)(jack_port_t *, jack_nframes_t);
+    void *s = symbol("jack_port_get_buffer");
+    if (!s) return 0;
+    func f = (func)s;
+    return f(port, sz);
+}
+
+static int dynamic_jack_port_unregister(jack_client_t *client,
+                                        jack_port_t *port)
+{
+    typedef int(*func)(jack_client_t *, jack_port_t *);
+    void *s = symbol("jack_port_unregister");
+    if (!s) return 0;
+    func f = (func)s;
+    return f(client, port);
+}
+
+#define dynamic1(rv, name, argtype, failval) \
+    static rv dynamic_##name(argtype arg) { \
+        typedef rv (*func) (argtype); \
+        void *s = symbol(#name); \
+        if (!s) return failval; \
+        func f = (func) s; \
+        return f(arg); \
+    }
+
+dynamic1(jack_client_t *, jack_client_new, const char *, 0);
+dynamic1(jack_nframes_t, jack_get_buffer_size, jack_client_t *, 0);
+dynamic1(jack_nframes_t, jack_get_sample_rate, jack_client_t *, 0);
+dynamic1(int, jack_activate, jack_client_t *, 1);
+dynamic1(int, jack_deactivate, jack_client_t *, 1);
+dynamic1(int, jack_client_close, jack_client_t *, 1);
+dynamic1(jack_nframes_t, jack_port_get_latency, jack_port_t *, 0);
+dynamic1(const char *, jack_port_name, const jack_port_t *, 0);
+
+#define jack_client_new dynamic_jack_client_new
+#define jack_get_buffer_size dynamic_jack_get_buffer_size
+#define jack_get_sample_rate dynamic_jack_get_sample_rate
+#define jack_set_process_callback dynamic_jack_set_process_callback
+#define jack_set_xrun_callback dynamic_jack_set_xrun_callback
+#define jack_activate dynamic_jack_activate
+#define jack_deactivate dynamic_jack_deactivate
+#define jack_client_close dynamic_jack_client_close
+#define jack_get_ports dynamic_jack_get_ports
+#define jack_port_register dynamic_jack_port_register
+#define jack_port_unregister dynamic_jack_port_unregister
+#define jack_port_get_latency dynamic_jack_port_get_latency
+#define jack_port_name dynamic_jack_port_name
+#define jack_connect dynamic_jack_connect
+#define jack_port_get_buffer dynamic_jack_port_get_buffer
+
+#endif
+#endif
+
+AudioJACKTarget::AudioJACKTarget(AudioCallbackPlaySource *source) :
+    AudioCallbackPlayTarget(source),
+    m_client(0),
+    m_bufferSize(0),
+    m_sampleRate(0)
+{
+    char name[100];
+    strcpy(name, "Sonic Visualiser");
+    m_client = jack_client_new(name);
+
+    if (!m_client) {
+	sprintf(name, "Sonic Visualiser (%d)", (int)getpid());
+	m_client = jack_client_new(name);
+	if (!m_client) {
+	    std::cerr
+		<< "ERROR: AudioJACKTarget: Failed to connect to JACK server"
+		<< std::endl;
+	}
+    }
+
+    if (!m_client) return;
+
+    m_bufferSize = jack_get_buffer_size(m_client);
+    m_sampleRate = jack_get_sample_rate(m_client);
+
+    jack_set_xrun_callback(m_client, xrunStatic, this);
+    jack_set_process_callback(m_client, processStatic, this);
+
+    if (jack_activate(m_client)) {
+	std::cerr << "ERROR: AudioJACKTarget: Failed to activate JACK client"
+		  << std::endl;
+    }
+
+    if (m_source) {
+	sourceModelReplaced();
+    }
+}
+
+AudioJACKTarget::~AudioJACKTarget()
+{
+    std::cerr << "AudioJACKTarget::~AudioJACKTarget()" << std::endl;
+    if (m_client) {
+	jack_deactivate(m_client);
+	jack_client_close(m_client);
+    }
+    std::cerr << "AudioJACKTarget::~AudioJACKTarget() done" << std::endl;
+}
+
+bool
+AudioJACKTarget::isOK() const
+{
+    return (m_client != 0);
+}
+
+int
+AudioJACKTarget::processStatic(jack_nframes_t nframes, void *arg)
+{
+    return ((AudioJACKTarget *)arg)->process(nframes);
+}
+
+int
+AudioJACKTarget::xrunStatic(void *arg)
+{
+    return ((AudioJACKTarget *)arg)->xrun();
+}
+
+void
+AudioJACKTarget::sourceModelReplaced()
+{
+    m_mutex.lock();
+
+    m_source->setTargetBlockSize(m_bufferSize);
+    m_source->setTargetSampleRate(m_sampleRate);
+
+    size_t channels = m_source->getSourceChannelCount();
+
+    // Because we offer pan, we always want at least 2 channels
+    if (channels < 2) channels = 2;
+
+    if (channels == m_outputs.size() || !m_client) {
+	m_mutex.unlock();
+	return;
+    }
+
+    const char **ports =
+	jack_get_ports(m_client, NULL, NULL,
+		       JackPortIsPhysical | JackPortIsInput);
+    size_t physicalPortCount = 0;
+    while (ports[physicalPortCount]) ++physicalPortCount;
+
+#ifdef DEBUG_AUDIO_JACK_TARGET    
+    std::cerr << "AudioJACKTarget::sourceModelReplaced: have " << channels << " channels and " << physicalPortCount << " physical ports" << std::endl;
+#endif
+
+    while (m_outputs.size() < channels) {
+	
+	char name[20];
+	jack_port_t *port;
+
+	sprintf(name, "out %d", m_outputs.size() + 1);
+
+	port = jack_port_register(m_client,
+				  name,
+				  JACK_DEFAULT_AUDIO_TYPE,
+				  JackPortIsOutput,
+				  0);
+
+	if (!port) {
+	    std::cerr
+		<< "ERROR: AudioJACKTarget: Failed to create JACK output port "
+		<< m_outputs.size() << std::endl;
+	} else {
+	    m_source->setTargetPlayLatency(jack_port_get_latency(port));
+	}
+
+	if (m_outputs.size() < physicalPortCount) {
+	    jack_connect(m_client, jack_port_name(port), ports[m_outputs.size()]);
+	}
+
+	m_outputs.push_back(port);
+    }
+
+    while (m_outputs.size() > channels) {
+	std::vector<jack_port_t *>::iterator itr = m_outputs.end();
+	--itr;
+	jack_port_t *port = *itr;
+	if (port) jack_port_unregister(m_client, port);
+	m_outputs.erase(itr);
+    }
+
+    m_mutex.unlock();
+}
+
+int
+AudioJACKTarget::process(jack_nframes_t nframes)
+{
+    if (!m_mutex.tryLock()) {
+	return 0;
+    }
+
+    if (m_outputs.empty()) {
+	m_mutex.unlock();
+	return 0;
+    }
+
+#ifdef DEBUG_AUDIO_JACK_TARGET    
+    std::cout << "AudioJACKTarget::process(" << nframes << "): have a source" << std::endl;
+#endif
+
+#ifdef DEBUG_AUDIO_JACK_TARGET    
+    if (m_bufferSize != nframes) {
+	std::cerr << "WARNING: m_bufferSize != nframes (" << m_bufferSize << " != " << nframes << ")" << std::endl;
+    }
+#endif
+
+    float **buffers = (float **)alloca(m_outputs.size() * sizeof(float *));
+
+    for (size_t ch = 0; ch < m_outputs.size(); ++ch) {
+	buffers[ch] = (float *)jack_port_get_buffer(m_outputs[ch], nframes);
+    }
+
+    size_t received = 0;
+
+    if (m_source) {
+	received = m_source->getSourceSamples(nframes, buffers);
+    }
+
+    for (size_t ch = 0; ch < m_outputs.size(); ++ch) {
+        for (size_t i = received; i < nframes; ++i) {
+            buffers[ch][i] = 0.0;
+        }
+    }
+
+    float peakLeft = 0.0, peakRight = 0.0;
+
+    for (size_t ch = 0; ch < m_outputs.size(); ++ch) {
+
+	float peak = 0.0;
+
+	for (size_t i = 0; i < nframes; ++i) {
+	    buffers[ch][i] *= m_outputGain;
+	    float sample = fabsf(buffers[ch][i]);
+	    if (sample > peak) peak = sample;
+	}
+
+	if (ch == 0) peakLeft = peak;
+	if (ch > 0 || m_outputs.size() == 1) peakRight = peak;
+    }
+	    
+    if (m_source) {
+	m_source->setOutputLevels(peakLeft, peakRight);
+    }
+
+    m_mutex.unlock();
+    return 0;
+}
+
+int
+AudioJACKTarget::xrun()
+{
+    std::cerr << "AudioJACKTarget: xrun!" << std::endl;
+    if (m_source) m_source->audioProcessingOverload();
+    return 0;
+}
+
+#endif /* HAVE_JACK */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioJACKTarget.h	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,60 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef _AUDIO_JACK_TARGET_H_
+#define _AUDIO_JACK_TARGET_H_
+
+#ifdef HAVE_JACK
+
+#include <jack/jack.h>
+#include <vector>
+
+#include "AudioCallbackPlayTarget.h"
+
+#include <QMutex>
+
+class AudioCallbackPlaySource;
+
+class AudioJACKTarget : public AudioCallbackPlayTarget
+{
+    Q_OBJECT
+
+public:
+    AudioJACKTarget(AudioCallbackPlaySource *source);
+    virtual ~AudioJACKTarget();
+
+    virtual bool isOK() const;
+
+public slots:
+    virtual void sourceModelReplaced();
+
+protected:
+    int process(jack_nframes_t nframes);
+    int xrun();
+
+    static int processStatic(jack_nframes_t, void *);
+    static int xrunStatic(void *);
+
+    jack_client_t              *m_client;
+    std::vector<jack_port_t *>  m_outputs;
+    jack_nframes_t              m_bufferSize;
+    jack_nframes_t              m_sampleRate;
+    QMutex                      m_mutex;
+};
+
+#endif /* HAVE_JACK */
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioPortAudioTarget.cpp	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,254 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifdef HAVE_PORTAUDIO
+
+#include "AudioPortAudioTarget.h"
+#include "AudioCallbackPlaySource.h"
+
+#include <iostream>
+#include <cassert>
+#include <cmath>
+
+//#define DEBUG_AUDIO_PORT_AUDIO_TARGET 1
+
+AudioPortAudioTarget::AudioPortAudioTarget(AudioCallbackPlaySource *source) :
+    AudioCallbackPlayTarget(source),
+    m_stream(0),
+    m_bufferSize(0),
+    m_sampleRate(0),
+    m_latency(0)
+{
+    PaError err;
+
+#ifdef DEBUG_AUDIO_PORT_AUDIO_TARGET
+#ifdef HAVE_PORTAUDIO_V18
+    std::cerr << "AudioPortAudioTarget: Initialising for PortAudio v18" << std::endl;
+#else
+    std::cerr << "AudioPortAudioTarget: Initialising for PortAudio v19" << std::endl;
+#endif
+#endif
+
+    err = Pa_Initialize();
+    if (err != paNoError) {
+	std::cerr << "ERROR: AudioPortAudioTarget: Failed to initialize PortAudio: " << Pa_GetErrorText(err) << std::endl;
+	return;
+    }
+
+    m_bufferSize = 1024;
+    m_sampleRate = 44100;
+    if (m_source && (m_source->getSourceSampleRate() != 0)) {
+	m_sampleRate = m_source->getSourceSampleRate();
+    }
+
+#ifdef HAVE_PORTAUDIO_V18
+    m_latency = Pa_GetMinNumBuffers(m_bufferSize, m_sampleRate) * m_bufferSize;
+#endif
+
+#ifdef HAVE_PORTAUDIO_V18
+    err = Pa_OpenDefaultStream(&m_stream, 0, 2, paFloat32,
+			       m_sampleRate, m_bufferSize, 0,
+			       processStatic, this);
+#else
+    err = Pa_OpenDefaultStream(&m_stream, 0, 2, paFloat32,
+			       m_sampleRate, m_bufferSize,
+			       processStatic, this);
+#endif    
+
+    if (err != paNoError) {
+	std::cerr << "ERROR: AudioPortAudioTarget: Failed to open PortAudio stream: " << Pa_GetErrorText(err) << std::endl;
+	m_stream = 0;
+	Pa_Terminate();
+	return;
+    }
+
+#ifndef HAVE_PORTAUDIO_V18
+    const PaStreamInfo *info = Pa_GetStreamInfo(m_stream);
+    m_latency = int(info->outputLatency * m_sampleRate + 0.001);
+#endif
+
+    std::cerr << "PortAudio latency = " << m_latency << " frames" << std::endl;
+
+    err = Pa_StartStream(m_stream);
+
+    if (err != paNoError) {
+	std::cerr << "ERROR: AudioPortAudioTarget: Failed to start PortAudio stream: " << Pa_GetErrorText(err) << std::endl;
+	Pa_CloseStream(m_stream);
+	m_stream = 0;
+	Pa_Terminate();
+	return;
+    }
+
+    if (m_source) {
+	std::cerr << "AudioPortAudioTarget: block size " << m_bufferSize << std::endl;
+	m_source->setTargetBlockSize(m_bufferSize);
+	m_source->setTargetSampleRate(m_sampleRate);
+	m_source->setTargetPlayLatency(m_latency);
+    }
+
+#ifdef DEBUG_PORT_AUDIO_TARGET
+    std::cerr << "AudioPortAudioTarget: initialised OK" << std::endl;
+#endif
+}
+
+AudioPortAudioTarget::~AudioPortAudioTarget()
+{
+    if (m_stream) {
+	PaError err;
+	err = Pa_CloseStream(m_stream);
+	if (err != paNoError) {
+	    std::cerr << "ERROR: AudioPortAudioTarget: Failed to close PortAudio stream: " << Pa_GetErrorText(err) << std::endl;
+	}
+	err = Pa_Terminate();
+        if (err != paNoError) {
+            std::cerr << "ERROR: AudioPortAudioTarget: Failed to terminate PortAudio: " << Pa_GetErrorText(err) << std::endl;
+	}   
+    }
+}
+
+bool
+AudioPortAudioTarget::isOK() const
+{
+    return (m_stream != 0);
+}
+
+#ifdef HAVE_PORTAUDIO_V18
+int
+AudioPortAudioTarget::processStatic(void *input, void *output,
+				    unsigned long nframes,
+				    PaTimestamp outTime, void *data)
+{
+    return ((AudioPortAudioTarget *)data)->process(input, output,
+						   nframes, outTime);
+}
+#else
+int
+AudioPortAudioTarget::processStatic(const void *input, void *output,
+                                    unsigned long nframes,
+                                    const PaStreamCallbackTimeInfo *timeInfo,
+                                    PaStreamCallbackFlags flags, void *data)
+{
+    return ((AudioPortAudioTarget *)data)->process(input, output,
+                                                   nframes, timeInfo,
+                                                   flags);
+}
+#endif
+
+void
+AudioPortAudioTarget::sourceModelReplaced()
+{
+    m_source->setTargetSampleRate(m_sampleRate);
+}
+
+#ifdef HAVE_PORTAUDIO_V18
+int
+AudioPortAudioTarget::process(void *inputBuffer, void *outputBuffer,
+			      unsigned long nframes,
+			      PaTimestamp)
+#else
+int
+AudioPortAudioTarget::process(const void *, void *outputBuffer,
+                              unsigned long nframes,
+                              const PaStreamCallbackTimeInfo *,
+                              PaStreamCallbackFlags)
+#endif
+{
+#ifdef DEBUG_AUDIO_PORT_AUDIO_TARGET    
+    std::cout << "AudioPortAudioTarget::process(" << nframes << ")" << std::endl;
+#endif
+
+    if (!m_source) return 0;
+
+    float *output = (float *)outputBuffer;
+
+    assert(nframes <= m_bufferSize);
+
+    static float **tmpbuf = 0;
+    static size_t tmpbufch = 0;
+    static size_t tmpbufsz = 0;
+
+    size_t sourceChannels = m_source->getSourceChannelCount();
+
+    // Because we offer pan, we always want at least 2 channels
+    if (sourceChannels < 2) sourceChannels = 2;
+
+    if (!tmpbuf || tmpbufch != sourceChannels || int(tmpbufsz) < m_bufferSize) {
+
+	if (tmpbuf) {
+	    for (size_t i = 0; i < tmpbufch; ++i) {
+		delete[] tmpbuf[i];
+	    }
+	    delete[] tmpbuf;
+	}
+
+	tmpbufch = sourceChannels;
+	tmpbufsz = m_bufferSize;
+	tmpbuf = new float *[tmpbufch];
+
+	for (size_t i = 0; i < tmpbufch; ++i) {
+	    tmpbuf[i] = new float[tmpbufsz];
+	}
+    }
+	
+    size_t received = m_source->getSourceSamples(nframes, tmpbuf);
+
+    float peakLeft = 0.0, peakRight = 0.0;
+
+    for (size_t ch = 0; ch < 2; ++ch) {
+	
+	float peak = 0.0;
+
+	if (ch < sourceChannels) {
+
+	    // PortAudio samples are interleaved
+	    for (size_t i = 0; i < nframes; ++i) {
+                if (i < received) {
+                    output[i * 2 + ch] = tmpbuf[ch][i] * m_outputGain;
+                    float sample = fabsf(output[i * 2 + ch]);
+                    if (sample > peak) peak = sample;
+                } else {
+                    output[i * 2 + ch] = 0;
+                }
+	    }
+
+	} else if (ch == 1 && sourceChannels == 1) {
+
+	    for (size_t i = 0; i < nframes; ++i) {
+                if (i < received) {
+                    output[i * 2 + ch] = tmpbuf[0][i] * m_outputGain;
+                    float sample = fabsf(output[i * 2 + ch]);
+                    if (sample > peak) peak = sample;
+                } else {
+                    output[i * 2 + ch] = 0;
+                }
+	    }
+
+	} else {
+	    for (size_t i = 0; i < nframes; ++i) {
+		output[i * 2 + ch] = 0;
+	    }
+	}
+
+	if (ch == 0) peakLeft = peak;
+	if (ch > 0 || sourceChannels == 1) peakRight = peak;
+    }
+
+    m_source->setOutputLevels(peakLeft, peakRight);
+
+    return 0;
+}
+
+#endif /* HAVE_PORTAUDIO */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioPortAudioTarget.h	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,78 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef _AUDIO_PORT_AUDIO_TARGET_H_
+#define _AUDIO_PORT_AUDIO_TARGET_H_
+
+#ifdef HAVE_PORTAUDIO
+
+// This code can be compiled for either PortAudio v18 or v19.
+// PortAudio v19 is the default.  If you want to use v18, define
+// the preprocessor symbol HAVE_PORTAUDIO_v18.
+
+#include <portaudio.h>
+#include <vector>
+
+#include "AudioCallbackPlayTarget.h"
+
+class AudioCallbackPlaySource;
+
+class AudioPortAudioTarget : public AudioCallbackPlayTarget
+{
+    Q_OBJECT
+
+public:
+    AudioPortAudioTarget(AudioCallbackPlaySource *source);
+    virtual ~AudioPortAudioTarget();
+
+    virtual bool isOK() const;
+
+public slots:
+    virtual void sourceModelReplaced();
+
+protected:
+#ifdef HAVE_PORTAUDIO_V18
+
+    int process(void *input, void *output, unsigned long frames,
+		PaTimestamp outTime);
+
+    static int processStatic(void *, void *, unsigned long,
+			     PaTimestamp, void *);
+
+    PortAudioStream *m_stream;
+
+#else
+
+    int process(const void *input, void *output, unsigned long frames,
+                const PaStreamCallbackTimeInfo *timeInfo,
+                PaStreamCallbackFlags statusFlags);
+
+    static int processStatic(const void *, void *, unsigned long,
+                             const PaStreamCallbackTimeInfo *,
+                             PaStreamCallbackFlags, void *);
+
+    PaStream *m_stream;
+
+#endif
+
+    int m_bufferSize;
+    int m_sampleRate;
+    int m_latency;
+};
+
+#endif /* HAVE_PORTAUDIO */
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioTargetFactory.cpp	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,69 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "AudioTargetFactory.h"
+
+#include "AudioJACKTarget.h"
+#include "AudioCoreAudioTarget.h"
+#include "AudioPortAudioTarget.h"
+
+#include <iostream>
+
+AudioCallbackPlayTarget *
+AudioTargetFactory::createCallbackTarget(AudioCallbackPlaySource *source)
+{
+    AudioCallbackPlayTarget *target = 0;
+
+#ifdef HAVE_JACK
+    target = new AudioJACKTarget(source);
+    if (target->isOK()) return target;
+    else {
+	std::cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open JACK target" << std::endl;
+	delete target;
+    }
+#endif
+
+#ifdef HAVE_COREAUDIO
+    target = new AudioCoreAudioTarget(source);
+    if (target->isOK()) return target;
+    else {
+	std::cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open CoreAudio target" << std::endl;
+	delete target;
+    }
+#endif
+
+#ifdef HAVE_DIRECTSOUND
+    target = new AudioDirectSoundTarget(source);
+    if (target->isOK()) return target;
+    else {
+	std::cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open DirectSound target" << std::endl;
+	delete target;
+    }
+#endif
+
+#ifdef HAVE_PORTAUDIO
+    target = new AudioPortAudioTarget(source);
+    if (target->isOK()) return target;
+    else {
+	std::cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open PortAudio target" << std::endl;
+	delete target;
+    }
+#endif
+
+    std::cerr << "WARNING: AudioTargetFactory::createCallbackTarget: No suitable targets available" << std::endl;
+    return 0;
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/AudioTargetFactory.h	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,29 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef _AUDIO_TARGET_FACTORY_H_
+#define _AUDIO_TARGET_FACTORY_H_
+
+class AudioCallbackPlaySource;
+class AudioCallbackPlayTarget;
+
+class AudioTargetFactory 
+{
+public:
+    static AudioCallbackPlayTarget *createCallbackTarget(AudioCallbackPlaySource *);
+};
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/PhaseVocoderTimeStretcher.cpp	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,626 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam and QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "PhaseVocoderTimeStretcher.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <QMutexLocker>
+
+//#define DEBUG_PHASE_VOCODER_TIME_STRETCHER 1
+
+PhaseVocoderTimeStretcher::PhaseVocoderTimeStretcher(size_t sampleRate,
+                                                     size_t channels,
+                                                     float ratio,
+                                                     bool sharpen,
+                                                     size_t maxOutputBlockSize) :
+    m_sampleRate(sampleRate),
+    m_channels(channels),
+    m_maxOutputBlockSize(maxOutputBlockSize),
+    m_ratio(ratio),
+    m_sharpen(sharpen),
+    m_totalCount(0),
+    m_transientCount(0),
+    m_n2sum(0),
+    m_mutex(new QMutex())
+{
+    initialise();
+}
+
+PhaseVocoderTimeStretcher::~PhaseVocoderTimeStretcher()
+{
+    std::cerr << "PhaseVocoderTimeStretcher::~PhaseVocoderTimeStretcher" << std::endl;
+
+    cleanup();
+    
+    delete m_mutex;
+}
+
+void
+PhaseVocoderTimeStretcher::initialise()
+{
+    std::cerr << "PhaseVocoderTimeStretcher::initialise" << std::endl;
+
+    calculateParameters();
+        
+    m_analysisWindow = new Window<float>(HanningWindow, m_wlen);
+    m_synthesisWindow = new Window<float>(HanningWindow, m_wlen);
+
+    m_prevPhase = new float *[m_channels];
+    m_prevAdjustedPhase = new float *[m_channels];
+
+    m_prevTransientMag = (float *)fftf_malloc(sizeof(float) * (m_wlen / 2 + 1));
+    m_prevTransientScore = 0;
+    m_prevTransient = false;
+
+    m_tempbuf = (float *)fftf_malloc(sizeof(float) * m_wlen);
+
+    m_time = new float *[m_channels];
+    m_freq = new fftf_complex *[m_channels];
+    m_plan = new fftf_plan[m_channels];
+    m_iplan = new fftf_plan[m_channels];
+
+    m_inbuf = new RingBuffer<float> *[m_channels];
+    m_outbuf = new RingBuffer<float> *[m_channels];
+    m_mashbuf = new float *[m_channels];
+
+    m_modulationbuf = (float *)fftf_malloc(sizeof(float) * m_wlen);
+        
+    for (size_t c = 0; c < m_channels; ++c) {
+
+        m_prevPhase[c] = (float *)fftf_malloc(sizeof(float) * (m_wlen / 2 + 1));
+        m_prevAdjustedPhase[c] = (float *)fftf_malloc(sizeof(float) * (m_wlen / 2 + 1));
+
+        m_time[c] = (float *)fftf_malloc(sizeof(float) * m_wlen);
+        m_freq[c] = (fftf_complex *)fftf_malloc(sizeof(fftf_complex) *
+                                                  (m_wlen / 2 + 1));
+        
+        m_plan[c] = fftf_plan_dft_r2c_1d(m_wlen, m_time[c], m_freq[c], FFTW_MEASURE);
+        m_iplan[c] = fftf_plan_dft_c2r_1d(m_wlen, m_freq[c], m_time[c], FFTW_MEASURE);
+
+        m_outbuf[c] = new RingBuffer<float>
+            ((m_maxOutputBlockSize + m_wlen) * 2);
+        m_inbuf[c] = new RingBuffer<float>
+            (lrintf(m_outbuf[c]->getSize() / m_ratio) + m_wlen);
+
+        std::cerr << "making inbuf size " << m_inbuf[c]->getSize() << " (outbuf size is " << m_outbuf[c]->getSize() << ", ratio " << m_ratio << ")" << std::endl;
+
+           
+        m_mashbuf[c] = (float *)fftf_malloc(sizeof(float) * m_wlen);
+        
+        for (size_t i = 0; i < m_wlen; ++i) {
+            m_mashbuf[c][i] = 0.0;
+        }
+
+        for (size_t i = 0; i <= m_wlen/2; ++i) {
+            m_prevPhase[c][i] = 0.0;
+            m_prevAdjustedPhase[c][i] = 0.0;
+        }
+    }
+
+    for (size_t i = 0; i < m_wlen; ++i) {
+        m_modulationbuf[i] = 0.0;
+    }
+
+    for (size_t i = 0; i <= m_wlen/2; ++i) {
+        m_prevTransientMag[i] = 0.0;
+    }
+}
+
+void
+PhaseVocoderTimeStretcher::calculateParameters()
+{
+    std::cerr << "PhaseVocoderTimeStretcher::calculateParameters" << std::endl;
+
+    m_wlen = 1024;
+
+    //!!! In transient sharpening mode, we need to pick the window
+    //length so as to be more or less fixed in audio duration (i.e. we
+    //need to exploit the sample rate)
+
+    //!!! have to work out the relationship between wlen and transient
+    //threshold
+
+    if (m_ratio < 1) {
+        if (m_ratio < 0.4) {
+            m_n1 = 1024;
+            m_wlen = 2048;
+        } else if (m_ratio < 0.8) {
+            m_n1 = 512;
+        } else {
+            m_n1 = 256;
+        }
+        if (shouldSharpen()) {
+            m_wlen = 2048;
+        }
+        m_n2 = lrintf(m_n1 * m_ratio);
+    } else {
+        if (m_ratio > 2) {
+            m_n2 = 512;
+            m_wlen = 4096; 
+        } else if (m_ratio > 1.6) {
+            m_n2 = 384;
+            m_wlen = 2048;
+        } else {
+            m_n2 = 256;
+        }
+        if (shouldSharpen()) {
+            if (m_wlen < 2048) m_wlen = 2048;
+        }
+        m_n1 = lrintf(m_n2 / m_ratio);
+        if (m_n1 == 0) {
+            m_n1 = 1;
+            m_n2 = lrintf(m_ratio);
+        }
+    }
+
+    m_transientThreshold = lrintf(m_wlen / 4.5);
+
+    m_totalCount = 0;
+    m_transientCount = 0;
+    m_n2sum = 0;
+
+
+    std::cerr << "PhaseVocoderTimeStretcher: channels = " << m_channels
+              << ", ratio = " << m_ratio
+              << ", n1 = " << m_n1 << ", n2 = " << m_n2 << ", wlen = "
+              << m_wlen << ", max = " << m_maxOutputBlockSize << std::endl;
+//              << ", outbuflen = " << m_outbuf[0]->getSize() << std::endl;
+}
+
+void
+PhaseVocoderTimeStretcher::cleanup()
+{
+    std::cerr << "PhaseVocoderTimeStretcher::cleanup" << std::endl;
+
+    for (size_t c = 0; c < m_channels; ++c) {
+
+        fftf_destroy_plan(m_plan[c]);
+        fftf_destroy_plan(m_iplan[c]);
+
+        fftf_free(m_time[c]);
+        fftf_free(m_freq[c]);
+
+        fftf_free(m_mashbuf[c]);
+        fftf_free(m_prevPhase[c]);
+        fftf_free(m_prevAdjustedPhase[c]);
+
+        delete m_inbuf[c];
+        delete m_outbuf[c];
+    }
+
+    fftf_free(m_tempbuf);
+    fftf_free(m_modulationbuf);
+    fftf_free(m_prevTransientMag);
+
+    delete[] m_prevPhase;
+    delete[] m_prevAdjustedPhase;
+    delete[] m_inbuf;
+    delete[] m_outbuf;
+    delete[] m_mashbuf;
+    delete[] m_time;
+    delete[] m_freq;
+    delete[] m_plan;
+    delete[] m_iplan;
+
+    delete m_analysisWindow;
+    delete m_synthesisWindow;
+}	
+
+void
+PhaseVocoderTimeStretcher::setRatio(float ratio)
+{
+    QMutexLocker locker(m_mutex);
+
+    size_t formerWlen = m_wlen;
+    m_ratio = ratio;
+
+    std::cerr << "PhaseVocoderTimeStretcher::setRatio: new ratio " << ratio
+              << std::endl;
+
+    calculateParameters();
+
+    if (m_wlen == formerWlen) {
+
+        // This is the only container whose size depends on m_ratio
+
+        RingBuffer<float> **newin = new RingBuffer<float> *[m_channels];
+
+        size_t formerSize = m_inbuf[0]->getSize();
+        size_t newSize = lrintf(m_outbuf[0]->getSize() / m_ratio) + m_wlen;
+
+        std::cerr << "resizing inbuf from " << formerSize << " to "
+                  << newSize << " (outbuf size is " << m_outbuf[0]->getSize() << ", ratio " << m_ratio << ")" << std::endl;
+
+        if (formerSize != newSize) {
+
+            size_t ready = m_inbuf[0]->getReadSpace();
+
+            for (size_t c = 0; c < m_channels; ++c) {
+                newin[c] = new RingBuffer<float>(newSize);
+            }
+
+            if (ready > 0) {
+
+                size_t copy = std::min(ready, newSize);
+                float *tmp = new float[ready];
+
+                for (size_t c = 0; c < m_channels; ++c) {
+                    m_inbuf[c]->read(tmp, ready);
+                    newin[c]->write(tmp + ready - copy, copy);
+                }
+                
+                delete[] tmp;
+            }
+            
+            for (size_t c = 0; c < m_channels; ++c) {
+                delete m_inbuf[c];
+            }
+            
+            delete[] m_inbuf;
+            m_inbuf = newin;
+        }
+
+    } else {
+        
+        std::cerr << "wlen changed" << std::endl;
+        cleanup();
+        initialise();
+    }
+}
+
+size_t
+PhaseVocoderTimeStretcher::getProcessingLatency() const
+{
+    return getWindowSize() - getInputIncrement();
+}
+
+size_t
+PhaseVocoderTimeStretcher::getRequiredInputSamples() const
+{
+    QMutexLocker locker(m_mutex);
+
+    if (m_inbuf[0]->getReadSpace() >= m_wlen) return 0;
+    return m_wlen - m_inbuf[0]->getReadSpace();
+}
+
+void
+PhaseVocoderTimeStretcher::putInput(float **input, size_t samples)
+{
+    QMutexLocker locker(m_mutex);
+
+    // We need to add samples from input to our internal buffer.  When
+    // we have m_windowSize samples in the buffer, we can process it,
+    // move the samples back by m_n1 and write the output onto our
+    // internal output buffer.  If we have (samples * ratio) samples
+    // in that, we can write m_n2 of them back to output and return
+    // (otherwise we have to write zeroes).
+
+    // When we process, we write m_wlen to our fixed output buffer
+    // (m_mashbuf).  We then pull out the first m_n2 samples from that
+    // buffer, push them into the output ring buffer, and shift
+    // m_mashbuf left by that amount.
+
+    // The processing latency is then m_wlen - m_n2.
+
+    size_t consumed = 0;
+
+    while (consumed < samples) {
+
+	size_t writable = m_inbuf[0]->getWriteSpace();
+	writable = std::min(writable, samples - consumed);
+
+	if (writable == 0) {
+#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
+	    std::cerr << "WARNING: PhaseVocoderTimeStretcher::putInput: writable == 0 (inbuf has " << m_inbuf[0]->getReadSpace() << " samples available for reading, space for " << m_inbuf[0]->getWriteSpace() << " more)" << std::endl;
+#endif
+            if (m_inbuf[0]->getReadSpace() < m_wlen ||
+                m_outbuf[0]->getWriteSpace() < m_n2) {
+                std::cerr << "WARNING: PhaseVocoderTimeStretcher::putInput: Inbuf has " << m_inbuf[0]->getReadSpace() << ", outbuf has space for " << m_outbuf[0]->getWriteSpace() << " (n2 = " << m_n2 << ", wlen = " << m_wlen << "), won't be able to process" << std::endl;
+                break;
+            }
+	} else {
+
+#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
+            std::cerr << "writing " << writable << " from index " << consumed << " to inbuf, consumed will be " << consumed + writable << std::endl;
+#endif
+
+            for (size_t c = 0; c < m_channels; ++c) {
+                m_inbuf[c]->write(input[c] + consumed, writable);
+            }
+            consumed += writable;
+        }
+
+	while (m_inbuf[0]->getReadSpace() >= m_wlen &&
+	       m_outbuf[0]->getWriteSpace() >= m_n2) {
+
+	    // We know we have at least m_wlen samples available
+	    // in m_inbuf.  We need to peek m_wlen of them for
+	    // processing, and then read m_n1 to advance the read
+	    // pointer.
+            
+            for (size_t c = 0; c < m_channels; ++c) {
+
+                size_t got = m_inbuf[c]->peek(m_tempbuf, m_wlen);
+                assert(got == m_wlen);
+
+                analyseBlock(c, m_tempbuf);
+            }
+
+            bool transient = false;
+            if (shouldSharpen()) transient = isTransient();
+
+            size_t n2 = m_n2;
+
+            if (transient) {
+                n2 = m_n1;
+            }
+
+            ++m_totalCount;
+            if (transient) ++m_transientCount;
+            m_n2sum += n2;
+
+//            std::cerr << "ratio for last 10: " <<last10num << "/" << (10 * m_n1) << " = " << float(last10num) / float(10 * m_n1) << " (should be " << m_ratio << ")" << std::endl;
+            
+            if (m_totalCount > 50 && m_transientCount < m_totalCount) {
+
+                int fixed = lrintf(m_transientCount * m_n1);
+
+                int idealTotal = lrintf(m_totalCount * m_n1 * m_ratio);
+                int idealSquashy = idealTotal - fixed;
+
+                int squashyCount = m_totalCount - m_transientCount;
+                
+                n2 = lrintf(idealSquashy / squashyCount);
+
+#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
+                if (n2 != m_n2) {
+                    std::cerr << m_n2 << " -> " << n2 << std::endl;
+                }
+#endif
+            }
+
+            for (size_t c = 0; c < m_channels; ++c) {
+
+                synthesiseBlock(c, m_mashbuf[c],
+                                c == 0 ? m_modulationbuf : 0,
+                                m_prevTransient ? m_n1 : m_n2);
+
+
+#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
+                std::cerr << "writing first " << m_n2 << " from mashbuf, skipping " << m_n1 << " on inbuf " << std::endl;
+#endif
+                m_inbuf[c]->skip(m_n1);
+
+                for (size_t i = 0; i < n2; ++i) {
+                    if (m_modulationbuf[i] > 0.f) {
+                        m_mashbuf[c][i] /= m_modulationbuf[i];
+                    }
+                }
+
+                m_outbuf[c]->write(m_mashbuf[c], n2);
+
+                for (size_t i = 0; i < m_wlen - n2; ++i) {
+                    m_mashbuf[c][i] = m_mashbuf[c][i + n2];
+                }
+
+                for (size_t i = m_wlen - n2; i < m_wlen; ++i) {
+                    m_mashbuf[c][i] = 0.0f;
+                }
+            }
+
+            m_prevTransient = transient;
+
+            for (size_t i = 0; i < m_wlen - n2; ++i) {
+                m_modulationbuf[i] = m_modulationbuf[i + n2];
+	    }
+
+	    for (size_t i = m_wlen - n2; i < m_wlen; ++i) {
+                m_modulationbuf[i] = 0.0f;
+	    }
+
+            if (!transient) m_n2 = n2;
+	}
+
+
+#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
+	std::cerr << "loop ended: inbuf read space " << m_inbuf[0]->getReadSpace() << ", outbuf write space " << m_outbuf[0]->getWriteSpace() << std::endl;
+#endif
+    }
+
+#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
+    std::cerr << "PhaseVocoderTimeStretcher::putInput returning" << std::endl;
+#endif
+
+//    std::cerr << "ratio: nominal: " << getRatio() << " actual: "
+//              << m_total2 << "/" << m_total1 << " = " << float(m_total2) / float(m_total1) << " ideal: " << m_ratio << std::endl;
+}
+
+size_t
+PhaseVocoderTimeStretcher::getAvailableOutputSamples() const
+{
+    QMutexLocker locker(m_mutex);
+
+    return m_outbuf[0]->getReadSpace();
+}
+
+void
+PhaseVocoderTimeStretcher::getOutput(float **output, size_t samples)
+{
+    QMutexLocker locker(m_mutex);
+
+    if (m_outbuf[0]->getReadSpace() < samples) {
+	std::cerr << "WARNING: PhaseVocoderTimeStretcher::getOutput: not enough data (yet?) (" << m_outbuf[0]->getReadSpace() << " < " << samples << ")" << std::endl;
+	size_t fill = samples - m_outbuf[0]->getReadSpace();
+        for (size_t c = 0; c < m_channels; ++c) {
+            for (size_t i = 0; i < fill; ++i) {
+                output[c][i] = 0.0;
+            }
+            m_outbuf[c]->read(output[c] + fill, m_outbuf[c]->getReadSpace());
+        }
+    } else {
+#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
+	std::cerr << "enough data - writing " << samples << " from outbuf" << std::endl;
+#endif
+        for (size_t c = 0; c < m_channels; ++c) {
+            m_outbuf[c]->read(output[c], samples);
+        }
+    }
+
+#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
+    std::cerr << "PhaseVocoderTimeStretcher::getOutput returning" << std::endl;
+#endif
+}
+
+void
+PhaseVocoderTimeStretcher::analyseBlock(size_t c, float *buf)
+{
+    size_t i;
+
+    // buf contains m_wlen samples
+
+#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
+    std::cerr << "PhaseVocoderTimeStretcher::analyseBlock (channel " << c << ")" << std::endl;
+#endif
+
+    m_analysisWindow->cut(buf);
+
+    for (i = 0; i < m_wlen/2; ++i) {
+	float temp = buf[i];
+	buf[i] = buf[i + m_wlen/2];
+	buf[i + m_wlen/2] = temp;
+    }
+
+    for (i = 0; i < m_wlen; ++i) {
+	m_time[c][i] = buf[i];
+    }
+
+    fftf_execute(m_plan[c]); // m_time -> m_freq
+}
+
+bool
+PhaseVocoderTimeStretcher::isTransient()
+{
+    int count = 0;
+
+    for (size_t i = 0; i <= m_wlen/2; ++i) {
+
+        float real = 0.f, imag = 0.f;
+
+        for (size_t c = 0; c < m_channels; ++c) {
+            real += m_freq[c][i][0];
+            imag += m_freq[c][i][1];
+        }
+
+        float sqrmag = (real * real + imag * imag);
+
+        if (m_prevTransientMag[i] > 0.f) {
+            float diff = 10.f * log10f(sqrmag / m_prevTransientMag[i]);
+            if (diff > 3.f) ++count;
+        }
+
+        m_prevTransientMag[i] = sqrmag;
+    }
+
+    bool isTransient = false;
+
+//    if (count > m_transientThreshold &&
+//        count > m_prevTransientScore * 1.2) {
+    if (count > m_prevTransientScore &&
+        count > m_transientThreshold &&
+        count - m_prevTransientScore > int(m_wlen) / 20) {
+        isTransient = true;
+
+
+//        std::cerr << "isTransient (count = " << count << ", prev = " << m_prevTransientScore << ", diff = " << count - m_prevTransientScore << ", ratio = " << (m_totalCount > 0 ? (float (m_n2sum) / float(m_totalCount * m_n1)) : 1.f) << ", ideal = " << m_ratio << ")" << std::endl;
+//    } else {
+//        std::cerr << " !transient (count = " << count << ", prev = " << m_prevTransientScore << ", diff = " << count - m_prevTransientScore << ")" << std::endl;
+    }
+
+    m_prevTransientScore = count;
+
+    return isTransient;
+}
+
+void
+PhaseVocoderTimeStretcher::synthesiseBlock(size_t c,
+                                           float *out,
+                                           float *modulation,
+                                           size_t lastStep)
+{
+    bool unchanged = (lastStep == m_n1);
+
+    for (size_t i = 0; i <= m_wlen/2; ++i) {
+		
+        float phase = princargf(atan2f(m_freq[c][i][1], m_freq[c][i][0]));
+        float adjustedPhase = phase;
+
+        if (!unchanged) {
+
+            float omega = (2 * M_PI * m_n1 * i) / m_wlen;
+	
+            float expectedPhase = m_prevPhase[c][i] + omega;
+
+            float phaseError = princargf(phase - expectedPhase);
+
+            float phaseIncrement = (omega + phaseError) / m_n1;
+            
+            adjustedPhase = m_prevAdjustedPhase[c][i] +
+                lastStep * phaseIncrement;
+            
+            float mag = sqrtf(m_freq[c][i][0] * m_freq[c][i][0] +
+                              m_freq[c][i][1] * m_freq[c][i][1]);
+            
+            float real = mag * cosf(adjustedPhase);
+            float imag = mag * sinf(adjustedPhase);
+            m_freq[c][i][0] = real;
+            m_freq[c][i][1] = imag;
+        }
+
+        m_prevPhase[c][i] = phase;
+        m_prevAdjustedPhase[c][i] = adjustedPhase;
+    }
+
+    fftf_execute(m_iplan[c]); // m_freq -> m_time, inverse fft
+
+    for (size_t i = 0; i < m_wlen/2; ++i) {
+        float temp = m_time[c][i];
+        m_time[c][i] = m_time[c][i + m_wlen/2];
+        m_time[c][i + m_wlen/2] = temp;
+    }
+    
+    for (size_t i = 0; i < m_wlen; ++i) {
+        m_time[c][i] = m_time[c][i] / m_wlen;
+    }
+
+    m_synthesisWindow->cut(m_time[c]);
+
+    for (size_t i = 0; i < m_wlen; ++i) {
+        out[i] += m_time[c][i];
+    }
+
+    if (modulation) {
+
+        float area = m_analysisWindow->getArea();
+
+        for (size_t i = 0; i < m_wlen; ++i) {
+            float val = m_synthesisWindow->getValue(i);
+            modulation[i] += val * area;
+        }
+    }
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/PhaseVocoderTimeStretcher.h	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,187 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam and QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef _PHASE_VOCODER_TIME_STRETCHER_H_
+#define _PHASE_VOCODER_TIME_STRETCHER_H_
+
+#include "base/Window.h"
+#include "base/RingBuffer.h"
+
+#include "data/fft/FFTapi.h"
+
+#include <QMutex>
+
+/**
+ * A time stretcher that alters the performance speed of audio,
+ * preserving pitch.
+ *
+ * This is based on the straightforward phase vocoder with phase
+ * unwrapping (as in e.g. the DAFX book pp275-), with optional
+ * percussive transient detection to avoid smearing percussive notes
+ * and resynchronise phases, and adding a stream API for real-time
+ * use.  Principles and methods from Chris Duxbury, AES 2002 and 2004
+ * thesis; Emmanuel Ravelli, DAFX 2005; Dan Barry, ISSC 2005 on
+ * percussion detection; code by Chris Cannam.
+ */
+
+class PhaseVocoderTimeStretcher
+{
+public:
+    PhaseVocoderTimeStretcher(size_t sampleRate,
+                              size_t channels,
+                              float ratio,
+                              bool sharpen,
+                              size_t maxOutputBlockSize);
+    virtual ~PhaseVocoderTimeStretcher();
+
+    /**
+     * Return the number of samples that would need to be added via
+     * putInput in order to provoke the time stretcher into doing some
+     * time stretching and making more output samples available.
+     * This will be an estimate, if transient sharpening is on; the 
+     * caller may need to do the put/get/test cycle more than once.
+     */
+    size_t getRequiredInputSamples() const;
+
+    /**
+     * Put (and possibly process) a given number of input samples.
+     * Number should usually equal the value returned from
+     * getRequiredInputSamples().
+     */
+    void putInput(float **input, size_t samples);
+
+    /**
+     * Get the number of processed samples ready for reading.
+     */
+    size_t getAvailableOutputSamples() const;
+
+    /**
+     * Get some processed samples.
+     */
+    void getOutput(float **output, size_t samples);
+
+    //!!! and reset?
+
+    /**
+     * Change the time stretch ratio.
+     */
+    void setRatio(float ratio);
+
+    /**
+     * Get the hop size for input.
+     */
+    size_t getInputIncrement() const { return m_n1; }
+
+    /**
+     * Get the hop size for output.
+     */
+    size_t getOutputIncrement() const { return m_n2; }
+
+    /**
+     * Get the window size for FFT processing.
+     */
+    size_t getWindowSize() const { return m_wlen; }
+
+    /**
+     * Get the stretch ratio.
+     */
+    float getRatio() const { return float(m_n2) / float(m_n1); }
+
+    /**
+     * Return whether this time stretcher will attempt to sharpen transients.
+     */
+    bool getSharpening() const { return m_sharpen; }
+
+    /**
+     * Return the number of channels for this time stretcher.
+     */
+    size_t getChannelCount() const { return m_channels; }
+
+    /**
+     * Get the latency added by the time stretcher, in sample frames.
+     * This will be exact if transient sharpening is off, or approximate
+     * if it is on.
+     */
+    size_t getProcessingLatency() const;
+
+protected:
+    /**
+     * Process a single phase vocoder frame from "in" into
+     * m_freq[channel].
+     */
+    void analyseBlock(size_t channel, float *in); // into m_freq[channel]
+
+    /**
+     * Examine m_freq[0..m_channels-1] and return whether a percussive
+     * transient is found.
+     */
+    bool isTransient(); 
+
+    /**
+     * Resynthesise from m_freq[channel] adding in to "out",
+     * adjusting phases on the basis of a prior step size of lastStep.
+     * Also add the window shape in to the modulation array (if
+     * present) -- for use in ensuring the output has the correct
+     * magnitude afterwards.
+     */
+    void synthesiseBlock(size_t channel, float *out, float *modulation,
+                         size_t lastStep);
+
+    void initialise();
+    void calculateParameters();
+    void cleanup();
+
+    bool shouldSharpen() {
+        return m_sharpen && (m_ratio > 0.25);
+    }
+
+    size_t m_sampleRate;
+    size_t m_channels;
+    size_t m_maxOutputBlockSize;
+    float m_ratio;
+    bool m_sharpen;
+    size_t m_n1;
+    size_t m_n2;
+    size_t m_wlen;
+    Window<float> *m_analysisWindow;
+    Window<float> *m_synthesisWindow;
+
+    int m_totalCount;
+    int m_transientCount;
+    int m_n2sum;
+
+    float **m_prevPhase;
+    float **m_prevAdjustedPhase;
+
+    float *m_prevTransientMag;
+    int  m_prevTransientScore;
+    int  m_transientThreshold;
+    bool m_prevTransient;
+
+    float *m_tempbuf;
+    float **m_time;
+    fftf_complex **m_freq;
+    fftf_plan *m_plan;
+    fftf_plan *m_iplan;
+    
+    RingBuffer<float> **m_inbuf;
+    RingBuffer<float> **m_outbuf;
+    float **m_mashbuf;
+    float *m_modulationbuf;
+
+    QMutex *m_mutex;
+};
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/PlaySpeedRangeMapper.cpp	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,133 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "PlaySpeedRangeMapper.h"
+
+#include <iostream>
+#include <cmath>
+
+PlaySpeedRangeMapper::PlaySpeedRangeMapper(int minpos, int maxpos) :
+    m_minpos(minpos),
+    m_maxpos(maxpos)
+{
+}
+
+int
+PlaySpeedRangeMapper::getPositionForValue(float value) const
+{
+    // value is percent
+    float factor = getFactorForValue(value);
+    int position = getPositionForFactor(factor);
+    return position;
+}
+
+int
+PlaySpeedRangeMapper::getPositionForFactor(float factor) const
+{
+    bool slow = (factor > 1.0);
+
+    if (!slow) factor = 1.0 / factor;
+    
+    int half = (m_maxpos + m_minpos) / 2;
+
+    factor = sqrtf((factor - 1.0) * 1000.f);
+    int position = lrintf(((factor * (half - m_minpos)) / 100.0) + m_minpos);
+
+    if (slow) {
+        position = half - position;
+    } else {
+        position = position + half;
+    }
+
+//    std::cerr << "value = " << value << " slow = " << slow << " factor = " << factor << " position = " << position << std::endl;
+
+    return position;
+}
+
+float
+PlaySpeedRangeMapper::getValueForPosition(int position) const
+{
+    float factor = getFactorForPosition(position);
+    float pc = getValueForFactor(factor);
+    return pc;
+}
+
+float
+PlaySpeedRangeMapper::getValueForFactor(float factor) const
+{
+    float pc;
+    if (factor < 1.0) pc = ((1.0 / factor) - 1.0) * 100.0;
+    else pc = (1.0 - factor) * 100.0;
+//    std::cerr << "position = " << position << " percent = " << pc << std::endl;
+    return pc;
+}
+
+float
+PlaySpeedRangeMapper::getFactorForValue(float value) const
+{
+    // value is percent
+    
+    float factor;
+
+    if (value <= 0) {
+        factor = 1.0 - (value / 100.0);
+    } else {
+        factor = 1.0 / (1.0 + (value / 100.0));
+    }
+
+//    std::cerr << "value = " << value << " factor = " << factor << std::endl;
+    return factor;
+}
+
+float
+PlaySpeedRangeMapper::getFactorForPosition(int position) const
+{
+    bool slow = false;
+
+    if (position < m_minpos) position = m_minpos;
+    if (position > m_maxpos) position = m_maxpos;
+
+    int half = (m_maxpos + m_minpos) / 2;
+
+    if (position < half) {
+        slow = true;
+        position = half - position;
+    } else {
+        position = position - half;
+    }
+
+    // position is between min and half (inclusive)
+
+    float factor;
+
+    if (position == m_minpos) {
+        factor = 1.0;
+    } else {
+        factor = ((position - m_minpos) * 100.0) / (half - m_minpos);
+        factor = 1.0 + (factor * factor) / 1000.f;
+    }
+
+    if (!slow) factor = 1.0 / factor;
+
+//    std::cerr << "position = " << position << " slow = " << slow << " factor = " << factor << std::endl;
+
+    return factor;
+}
+
+QString
+PlaySpeedRangeMapper::getUnit() const
+{
+    return "%";
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/PlaySpeedRangeMapper.h	Wed Oct 24 16:34:31 2007 +0000
@@ -0,0 +1,43 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef _PLAY_SPEED_RANGE_MAPPER_H_
+#define _PLAY_SPEED_RANGE_MAPPER_H_
+
+#include "base/RangeMapper.h"
+
+class PlaySpeedRangeMapper : public RangeMapper
+{
+public:
+    PlaySpeedRangeMapper(int minpos, int maxpos);
+
+    virtual int getPositionForValue(float value) const;
+    virtual float getValueForPosition(int position) const;
+
+    int getPositionForFactor(float factor) const;
+    float getValueForFactor(float factor) const;
+
+    float getFactorForPosition(int position) const;
+    float getFactorForValue(float value) const;
+
+    virtual QString getUnit() const;
+    
+protected:
+    int m_minpos;
+    int m_maxpos;
+};
+
+
+#endif