changeset 100:22bf057ea151 1.2-stable

* merge from trunk (1.2 ended up being tracked from trunk, but we may want this branch for fixes later)
author Chris Cannam
date Wed, 27 Feb 2008 10:32:45 +0000
parents a8acc7841d70
children
files audioio/AudioCallbackPlaySource.cpp audioio/AudioCallbackPlaySource.h audioio/AudioCallbackPlayTarget.h audioio/AudioGenerator.cpp audioio/AudioJACKTarget.cpp audioio/AudioJACKTarget.h audioio/AudioPortAudioTarget.cpp audioio/AudioPortAudioTarget.h audioio/PhaseVocoderTimeStretcher.cpp audioio/PhaseVocoderTimeStretcher.h audioio/audioio.pro framework/Document.cpp framework/Document.h framework/MainWindowBase.cpp framework/MainWindowBase.h framework/SVFileReader.cpp framework/SVFileReader.h
diffstat 17 files changed, 1164 insertions(+), 1347 deletions(-) [+]
line wrap: on
line diff
--- a/audioio/AudioCallbackPlaySource.cpp	Fri Nov 30 17:36:14 2007 +0000
+++ b/audioio/AudioCallbackPlaySource.cpp	Wed Feb 27 10:32:45 2008 +0000
@@ -26,12 +26,10 @@
 #include "data/model/SparseOneDimensionalModel.h"
 #include "plugin/RealTimePluginInstance.h"
 
-#ifdef HAVE_RUBBERBAND
+#include "AudioCallbackPlayTarget.h"
+
 #include <rubberband/RubberBandStretcher.h>
 using namespace RubberBand;
-#else
-#include "PhaseVocoderTimeStretcher.h"
-#endif
 
 #include <iostream>
 #include <cassert>
@@ -56,6 +54,9 @@
     m_sourceSampleRate(0),
     m_targetSampleRate(0),
     m_playLatency(0),
+    m_target(0),
+    m_lastRetrievalTimestamp(0.0),
+    m_lastRetrievedBlockSize(0),
     m_playing(false),
     m_exiting(false),
     m_lastModelEndFrame(0),
@@ -63,7 +64,13 @@
     m_outputRight(0.0),
     m_auditioningPlugin(0),
     m_auditioningPluginBypassed(false),
+    m_playStartFrame(0),
+    m_playStartFramePassed(false),
     m_timeStretcher(0),
+    m_stretchRatio(1.0),
+    m_stretcherInputCount(0),
+    m_stretcherInputs(0),
+    m_stretcherInputSizes(0),
     m_fillThread(0),
     m_converter(0),
     m_crapConverter(0),
@@ -107,11 +114,14 @@
 
     delete m_audioGenerator;
 
+    for (size_t i = 0; i < m_stretcherInputCount; ++i) {
+        delete[] m_stretcherInputs[i];
+    }
+    delete[] m_stretcherInputSizes;
+    delete[] m_stretcherInputs;
+
     m_bufferScavenger.scavenge(true);
     m_pluginScavenger.scavenge(true);
-#ifndef HAVE_RUBBERBAND
-    m_timeStretcherScavenger.scavenge(true);
-#endif
 }
 
 void
@@ -236,7 +246,10 @@
 #ifdef DEBUG_AUDIO_PLAY_SOURCE
     std::cerr << "AudioCallbackPlaySource::modelChanged(" << startFrame << "," << endFrame << ")" << std::endl;
 #endif
-    if (endFrame > m_lastModelEndFrame) m_lastModelEndFrame = endFrame;
+    if (endFrame > m_lastModelEndFrame) {
+        m_lastModelEndFrame = endFrame;
+        rebuildRangeLists();
+    }
 }
 
 void
@@ -304,6 +317,8 @@
     m_mutex.unlock();
 
     m_audioGenerator->clearModels();
+
+    clearRingBuffers();
 }    
 
 void
@@ -311,22 +326,13 @@
 {
     if (!haveLock) m_mutex.lock();
 
+    rebuildRangeLists();
+
     if (count == 0) {
 	if (m_writeBuffers) count = m_writeBuffers->size();
     }
 
-    size_t sf = m_readBufferFill;
-    RingBuffer<float> *rb = getReadRingBuffer(0);
-    if (rb) {
-	//!!! This is incorrect if we're in a non-contiguous selection
-	//Same goes for all related code (subtracting the read space
-	//from the fill frame to try to establish where the effective
-	//pre-resample/timestretch read pointer is)
-	size_t rs = rb->getReadSpace();
-	if (rs < sf) sf -= rs;
-	else sf = 0;
-    }
-    m_writeBufferFill = sf;
+    m_writeBufferFill = getCurrentBufferedFrame();
 
     if (m_readBuffers != m_writeBuffers) {
 	delete m_writeBuffers;
@@ -352,8 +358,12 @@
     if (m_viewManager->getPlaySelectionMode() &&
 	!m_viewManager->getSelections().empty()) {
 
+        std::cerr << "AudioCallbackPlaySource::play: constraining frame " << startFrame << " to selection = ";
+
         startFrame = m_viewManager->constrainFrameToSelection(startFrame);
 
+        std::cerr << startFrame << std::endl;
+
     } else {
 	if (startFrame >= m_lastModelEndFrame) {
 	    startFrame = 0;
@@ -371,11 +381,16 @@
     // we're just re-seeking.
 
     m_mutex.lock();
+    if (m_timeStretcher) {
+        m_timeStretcher->reset();
+    }
     if (m_playing) {
+        std::cerr << "playing already, resetting" << std::endl;
 	m_readBufferFill = m_writeBufferFill = startFrame;
 	if (m_readBuffers) {
 	    for (size_t c = 0; c < getTargetChannelCount(); ++c) {
 		RingBuffer<float> *rb = getReadRingBuffer(c);
+                std::cerr << "reset ring buffer for channel " << c << std::endl;
 		if (rb) rb->reset();
 	    }
 	}
@@ -390,7 +405,15 @@
 
     m_audioGenerator->reset();
 
+    m_playStartFrame = startFrame;
+    m_playStartFramePassed = false;
+    m_playStartedAt = RealTime::zeroTime;
+    if (m_target) {
+        m_playStartedAt = RealTime::fromSeconds(m_target->getCurrentTime());
+    }
+
     bool changed = !m_playing;
+    m_lastRetrievalTimestamp = 0;
     m_playing = true;
     m_condition.wakeAll();
     if (changed) emit playStatusChanged(m_playing);
@@ -402,6 +425,7 @@
     bool changed = m_playing;
     m_playing = false;
     m_condition.wakeAll();
+    m_lastRetrievalTimestamp = 0;
     if (changed) emit playStatusChanged(m_playing);
 }
 
@@ -452,8 +476,9 @@
 }
 
 void
-AudioCallbackPlaySource::setTargetBlockSize(size_t size)
+AudioCallbackPlaySource::setTarget(AudioCallbackPlayTarget *target, size_t size)
 {
+    m_target = target;
 //    std::cout << "AudioCallbackPlaySource::setTargetBlockSize() -> " << size << std::endl;
     assert(size < m_ringBufferSize);
     m_blockSize = size;
@@ -481,134 +506,273 @@
 size_t
 AudioCallbackPlaySource::getCurrentPlayingFrame()
 {
+    // This method attempts to estimate which audio sample frame is
+    // "currently coming through the speakers".
+
+    size_t targetRate = getTargetSampleRate();
+    size_t latency = m_playLatency; // at target rate
+    RealTime latency_t = RealTime::frame2RealTime(latency, targetRate);
+
+    return getCurrentFrame(latency_t);
+}
+
+size_t
+AudioCallbackPlaySource::getCurrentBufferedFrame()
+{
+    return getCurrentFrame(RealTime::zeroTime);
+}
+
+size_t
+AudioCallbackPlaySource::getCurrentFrame(RealTime latency_t)
+{
     bool resample = false;
-    double ratio = 1.0;
+    double resampleRatio = 1.0;
 
-    if (getSourceSampleRate() != getTargetSampleRate()) {
-	resample = true;
-	ratio = double(getSourceSampleRate()) / double(getTargetSampleRate());
-    }
+    // We resample when filling the ring buffer, and time-stretch when
+    // draining it.  The buffer contains data at the "target rate" and
+    // the latency provided by the target is also at the target rate.
+    // Because of the multiple rates involved, we do the actual
+    // calculation using RealTime instead.
 
-    size_t readSpace = 0;
+    size_t sourceRate = getSourceSampleRate();
+    size_t targetRate = getTargetSampleRate();
+
+    if (sourceRate == 0 || targetRate == 0) return 0;
+
+    size_t inbuffer = 0; // at target rate
+
     for (size_t c = 0; c < getTargetChannelCount(); ++c) {
 	RingBuffer<float> *rb = getReadRingBuffer(c);
 	if (rb) {
-	    size_t spaceHere = rb->getReadSpace();
-	    if (c == 0 || spaceHere < readSpace) readSpace = spaceHere;
+	    size_t here = rb->getReadSpace();
+	    if (c == 0 || here < inbuffer) inbuffer = here;
 	}
     }
 
-    if (resample) {
-	readSpace = size_t(readSpace * ratio + 0.1);
+    size_t readBufferFill = m_readBufferFill;
+    size_t lastRetrievedBlockSize = m_lastRetrievedBlockSize;
+    double lastRetrievalTimestamp = m_lastRetrievalTimestamp;
+    double currentTime = 0.0;
+    if (m_target) currentTime = m_target->getCurrentTime();
+
+    RealTime inbuffer_t = RealTime::frame2RealTime(inbuffer, targetRate);
+
+    size_t stretchlat = 0;
+    double timeRatio = 1.0;
+
+    if (m_timeStretcher) {
+        stretchlat = m_timeStretcher->getLatency();
+        timeRatio = m_timeStretcher->getTimeRatio();
     }
 
-    size_t latency = m_playLatency;
-    if (resample) latency = size_t(m_playLatency * ratio + 0.1);
+    RealTime stretchlat_t = RealTime::frame2RealTime(stretchlat, targetRate);
 
-#ifdef HAVE_RUBBERBAND
-    if (m_timeStretcher) {
-        latency += m_timeStretcher->getLatency();
+    // When the target has just requested a block from us, the last
+    // sample it obtained was our buffer fill frame count minus the
+    // amount of read space (converted back to source sample rate)
+    // remaining now.  That sample is not expected to be played until
+    // the target's play latency has elapsed.  By the time the
+    // following block is requested, that sample will be at the
+    // target's play latency minus the last requested block size away
+    // from being played.
+
+    RealTime sincerequest_t = RealTime::zeroTime;
+    RealTime lastretrieved_t = RealTime::zeroTime;
+
+    if (m_target && lastRetrievalTimestamp != 0.0) {
+
+        lastretrieved_t = RealTime::frame2RealTime
+            (lastRetrievedBlockSize, targetRate);
+
+        // calculate number of frames at target rate that have elapsed
+        // since the end of the last call to getSourceSamples
+
+        double elapsed = currentTime - lastRetrievalTimestamp;
+
+        if (elapsed > 0.0) {
+            sincerequest_t = RealTime::fromSeconds(elapsed);
+        }
+
+    } else {
+
+        lastretrieved_t = RealTime::frame2RealTime
+            (getTargetBlockSize(), targetRate);
     }
-#else
-    PhaseVocoderTimeStretcher *timeStretcher = m_timeStretcher;
-    if (timeStretcher) {
-	latency += timeStretcher->getProcessingLatency();
+
+    RealTime bufferedto_t = RealTime::frame2RealTime(readBufferFill, sourceRate);
+
+    if (timeRatio != 1.0) {
+        lastretrieved_t = lastretrieved_t / timeRatio;
+        sincerequest_t = sincerequest_t / timeRatio;
     }
+
+    bool looping = m_viewManager->getPlayLoopMode();
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+    std::cerr << "\nbuffered to: " << bufferedto_t << ", in buffer: " << inbuffer_t << ", time ratio " << timeRatio << "\n  stretcher latency: " << stretchlat_t << ", device latency: " << latency_t << "\n  since request: " << sincerequest_t << ", last retrieved: " << lastretrieved_t << std::endl;
 #endif
 
-    latency += readSpace;
-    size_t bufferedFrame = m_readBufferFill;
+    RealTime end = RealTime::frame2RealTime(m_lastModelEndFrame, sourceRate);
 
-    bool looping = m_viewManager->getPlayLoopMode();
-    bool constrained = (m_viewManager->getPlaySelectionMode() &&
-			!m_viewManager->getSelections().empty());
+    // Normally the range lists should contain at least one item each
+    // -- if playback is unconstrained, that item should report the
+    // entire source audio duration.
 
-    size_t framePlaying = bufferedFrame;
-
-    if (looping && !constrained) {
-	while (framePlaying < latency) framePlaying += m_lastModelEndFrame;
+    if (m_rangeStarts.empty()) {
+        rebuildRangeLists();
     }
 
-    if (framePlaying > latency) framePlaying -= latency;
-    else framePlaying = 0;
+    if (m_rangeStarts.empty()) {
+        // this code is only used in case of error in rebuildRangeLists
+        RealTime playing_t = bufferedto_t
+            - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t
+            + sincerequest_t;
+        size_t frame = RealTime::realTime2Frame(playing_t, sourceRate);
+        return m_viewManager->alignPlaybackFrameToReference(frame);
+    }
 
-//    std::cerr << "framePlaying = " << framePlaying << " -> reference ";
+    int inRange = 0;
+    int index = 0;
 
-    framePlaying = m_viewManager->alignPlaybackFrameToReference(framePlaying);
+    for (size_t i = 0; i < m_rangeStarts.size(); ++i) {
+        if (bufferedto_t >= m_rangeStarts[i]) {
+            inRange = index;
+        } else {
+            break;
+        }
+        ++index;
+    }
 
-//    std::cerr << framePlaying << std::endl;
+    if (inRange >= m_rangeStarts.size()) inRange = m_rangeStarts.size()-1;
+
+    RealTime playing_t = bufferedto_t;
+
+    playing_t = playing_t
+        - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t
+        + sincerequest_t;
+
+    // This rather gross little hack is used to ensure that latency
+    // compensation doesn't result in the playback pointer appearing
+    // to start earlier than the actual playback does.  It doesn't
+    // work properly (hence the bail-out in the middle) because if we
+    // are playing a relatively short looped region, the playing time
+    // estimated from the buffer fill frame may have wrapped around
+    // the region boundary and end up being much smaller than the
+    // theoretical play start frame, perhaps even for the entire
+    // duration of playback!
+
+    if (!m_playStartFramePassed) {
+        RealTime playstart_t = RealTime::frame2RealTime(m_playStartFrame,
+                                                        sourceRate);
+        if (playing_t < playstart_t) {
+//            std::cerr << "playing_t " << playing_t << " < playstart_t " 
+//                      << playstart_t << std::endl;
+            if (sincerequest_t > RealTime::zeroTime &&
+                m_playStartedAt + latency_t + stretchlat_t <
+                RealTime::fromSeconds(currentTime)) {
+//                std::cerr << "but we've been playing for long enough that I think we should disregard it (it probably results from loop wrapping)" << std::endl;
+                m_playStartFramePassed = true;
+            } else {
+                playing_t = playstart_t;
+            }
+        } else {
+            m_playStartFramePassed = true;
+        }
+    }
+
+    playing_t = playing_t - m_rangeStarts[inRange];
+ 
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+    std::cerr << "playing_t as offset into range " << inRange << " (with start = " << m_rangeStarts[inRange] << ") = " << playing_t << std::endl;
+#endif
+
+    while (playing_t < RealTime::zeroTime) {
+
+        if (inRange == 0) {
+            if (looping) {
+                inRange = m_rangeStarts.size() - 1;
+            } else {
+                break;
+            }
+        } else {
+            --inRange;
+        }
+
+        playing_t = playing_t + m_rangeDurations[inRange];
+    }
+
+    playing_t = playing_t + m_rangeStarts[inRange];
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+    std::cerr << "  playing time: " << playing_t << std::endl;
+#endif
+
+    if (!looping) {
+        if (inRange == m_rangeStarts.size()-1 &&
+            playing_t >= m_rangeStarts[inRange] + m_rangeDurations[inRange]) {
+std::cerr << "Not looping, inRange " << inRange << " == rangeStarts.size()-1, playing_t " << playing_t << " >= m_rangeStarts[inRange] " << m_rangeStarts[inRange] << " + m_rangeDurations[inRange] " << m_rangeDurations[inRange] << " -- stopping" << std::endl;
+            stop();
+        }
+    }
+
+    if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime;
+
+    size_t frame = RealTime::realTime2Frame(playing_t, sourceRate);
+    return m_viewManager->alignPlaybackFrameToReference(frame);
+}
+
+void
+AudioCallbackPlaySource::rebuildRangeLists()
+{
+    bool constrained = (m_viewManager->getPlaySelectionMode());
+
+    m_rangeStarts.clear();
+    m_rangeDurations.clear();
+
+    size_t sourceRate = getSourceSampleRate();
+    if (sourceRate == 0) return;
+
+    RealTime end = RealTime::frame2RealTime(m_lastModelEndFrame, sourceRate);
+    if (end == RealTime::zeroTime) return;
 
     if (!constrained) {
-	if (!looping && framePlaying > m_lastModelEndFrame) {
-	    framePlaying = m_lastModelEndFrame;
-	    stop();
-	}
-	return framePlaying;
+        m_rangeStarts.push_back(RealTime::zeroTime);
+        m_rangeDurations.push_back(end);
+        return;
     }
 
-    bufferedFrame = m_viewManager->alignPlaybackFrameToReference(bufferedFrame);
-
     MultiSelection::SelectionList selections = m_viewManager->getSelections();
     MultiSelection::SelectionList::const_iterator i;
 
-//    i = selections.begin();
-//    size_t rangeStart = i->getStartFrame();
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cerr << "AudioCallbackPlaySource::rebuildRangeLists" << std::endl;
+#endif
 
-    i = selections.end();
-    --i;
-    size_t rangeEnd = i->getEndFrame();
+    if (!selections.empty()) {
 
-    for (i = selections.begin(); i != selections.end(); ++i) {
-	if (i->contains(bufferedFrame)) break;
+        for (i = selections.begin(); i != selections.end(); ++i) {
+            
+            RealTime start =
+                (RealTime::frame2RealTime
+                 (m_viewManager->alignReferenceToPlaybackFrame(i->getStartFrame()),
+                  sourceRate));
+            RealTime duration = 
+                (RealTime::frame2RealTime
+                 (m_viewManager->alignReferenceToPlaybackFrame(i->getEndFrame()) -
+                  m_viewManager->alignReferenceToPlaybackFrame(i->getStartFrame()),
+                  sourceRate));
+            
+            m_rangeStarts.push_back(start);
+            m_rangeDurations.push_back(duration);
+        }
+    } else {
+        m_rangeStarts.push_back(RealTime::zeroTime);
+        m_rangeDurations.push_back(end);
     }
 
-    size_t f = bufferedFrame;
-
-//    std::cout << "getCurrentPlayingFrame: f=" << f << ", latency=" << latency << ", rangeEnd=" << rangeEnd << std::endl;
-
-    if (i == selections.end()) {
-	--i;
-	if (i->getEndFrame() + latency < f) {
-//    std::cout << "framePlaying = " << framePlaying << ", rangeEnd = " << rangeEnd << std::endl;
-
-	    if (!looping && (framePlaying > rangeEnd)) {
-//		std::cout << "STOPPING" << std::endl;
-		stop();
-		return rangeEnd;
-	    } else {
-		return framePlaying;
-	    }
-	} else {
-//	    std::cout << "latency <- " << latency << "-(" << f << "-" << i->getEndFrame() << ")" << std::endl;
-	    latency -= (f - i->getEndFrame());
-	    f = i->getEndFrame();
-	}
-    }
-
-//    std::cout << "i=(" << i->getStartFrame() << "," << i->getEndFrame() << ") f=" << f << ", latency=" << latency << std::endl;
-
-    while (latency > 0) {
-	size_t offset = f - i->getStartFrame();
-	if (offset >= latency) {
-	    if (f > latency) {
-		framePlaying = f - latency;
-	    } else {
-		framePlaying = 0;
-	    }
-	    break;
-	} else {
-	    if (i == selections.begin()) {
-		if (looping) {
-		    i = selections.end();
-		}
-	    }
-	    latency -= offset;
-	    --i;
-	    f = i->getEndFrame();
-	}
-    }
-
-    return framePlaying;
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+    std::cerr << "Now have " << m_rangeStarts.size() << " play ranges" << std::endl;
+#endif
 }
 
 void
@@ -758,65 +922,29 @@
 }
 
 void
-AudioCallbackPlaySource::setTimeStretch(float factor, bool sharpen, bool mono)
+AudioCallbackPlaySource::setTimeStretch(float factor)
 {
-#ifdef HAVE_RUBBERBAND
-    if (m_timeStretcher) {
-        m_timeStretchRatioMutex.lock();
-        m_timeStretcher->setTimeRatio(factor);
-        m_timeStretchRatioMutex.unlock();
+    m_stretchRatio = factor;
+
+    if (m_timeStretcher || (factor == 1.f)) {
+        // stretch ratio will be set in next process call if appropriate
         return;
     } else {
+        m_stretcherInputCount = getTargetChannelCount();
         RubberBandStretcher *stretcher = new RubberBandStretcher
             (getTargetSampleRate(),
-             getTargetChannelCount(),
+             m_stretcherInputCount,
              RubberBandStretcher::OptionProcessRealTime,
              factor);
+        m_stretcherInputs = new float *[m_stretcherInputCount];
+        m_stretcherInputSizes = new size_t[m_stretcherInputCount];
+        for (size_t c = 0; c < m_stretcherInputCount; ++c) {
+            m_stretcherInputSizes[c] = 16384;
+            m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]];
+        }
         m_timeStretcher = stretcher;
         return;
     }
-#else
-    // Avoid locks -- create, assign, mark old one for scavenging
-    // later (as a call to getSourceSamples may still be using it)
-
-    PhaseVocoderTimeStretcher *existingStretcher = m_timeStretcher;
-
-    size_t channels = getTargetChannelCount();
-    if (mono) channels = 1;
-
-    if (existingStretcher &&
-        existingStretcher->getRatio() == factor &&
-        existingStretcher->getSharpening() == sharpen &&
-        existingStretcher->getChannelCount() == channels) {
-	return;
-    }
-
-    if (factor != 1) {
-
-        if (existingStretcher &&
-            existingStretcher->getSharpening() == sharpen &&
-            existingStretcher->getChannelCount() == channels) {
-            existingStretcher->setRatio(factor);
-            return;
-        }
-
-	PhaseVocoderTimeStretcher *newStretcher = new PhaseVocoderTimeStretcher
-	    (getTargetSampleRate(),
-             channels,
-             factor,
-             sharpen,
-             getTargetBlockSize());
-
-	m_timeStretcher = newStretcher;
-
-    } else {
-	m_timeStretcher = 0;
-    }
-
-    if (existingStretcher) {
-	m_timeStretcherScavenger.claim(existingStretcher);
-    }
-#endif
 }
 
 size_t
@@ -860,13 +988,22 @@
 
     if (count == 0) return 0;
 
-#ifdef HAVE_RUBBERBAND
     RubberBandStretcher *ts = m_timeStretcher;
     float ratio = ts ? ts->getTimeRatio() : 1.f;
-#else
-    PhaseVocoderTimeStretcher *ts = m_timeStretcher;
-    float ratio = ts ? ts->getRatio() : 1.f;
-#endif
+
+    if (ratio != m_stretchRatio) {
+        if (!ts) {
+            std::cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: Time ratio change to " << m_stretchRatio << " is pending, but no stretcher is set" << std::endl;
+            m_stretchRatio = 1.f;
+        } else {
+            ts->setTimeRatio(m_stretchRatio);
+        }
+    }
+
+    if (m_target) {
+        m_lastRetrievedBlockSize = count;
+        m_lastRetrievalTimestamp = m_target->getCurrentTime();
+    }
 
     if (!ts || ratio == 1.f) {
 
@@ -900,68 +1037,58 @@
         applyAuditioningEffect(count, buffer);
 
         m_condition.wakeAll();
+
 	return got;
     }
 
     size_t channels = getTargetChannelCount();
+    size_t available;
+    int warned = 0;
+    size_t fedToStretcher = 0;
 
-#ifdef HAVE_RUBBERBAND
-    bool mix = false;
-#else
-    bool mix = (channels > 1 && ts->getChannelCount() == 1);
+    // The input block for a given output is approx output / ratio,
+    // but we can't predict it exactly, for an adaptive timestretcher.
+
+    while ((available = ts->available()) < count) {
+
+        size_t reqd = lrintf((count - available) / ratio);
+        reqd = std::max(reqd, ts->getSamplesRequired());
+        if (reqd == 0) reqd = 1;
+                
+        size_t got = reqd;
+
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+        std::cerr << "reqd = " <<reqd << ", channels = " << channels << ", ic = " << m_stretcherInputCount << std::endl;
 #endif
 
-    size_t available;
+        for (size_t c = 0; c < channels; ++c) {
+            if (c >= m_stretcherInputCount) continue;
+            if (reqd > m_stretcherInputSizes[c]) {
+                if (c == 0) {
+                    std::cerr << "WARNING: resizing stretcher input buffer from " << m_stretcherInputSizes[c] << " to " << (reqd * 2) << std::endl;
+                }
+                delete[] m_stretcherInputs[c];
+                m_stretcherInputSizes[c] = reqd * 2;
+                m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]];
+            }
+        }
 
-    int warned = 0;
-
-    // We want output blocks of e.g. 1024 (probably fixed, certainly
-    // bounded).  We can provide input blocks of any size (unbounded)
-    // at the timestretcher's request.  The input block for a given
-    // output is approx output / ratio, but we can't predict it
-    // exactly, for an adaptive timestretcher.  The stretcher will
-    // need some additional buffer space.  See the time stretcher code
-    // and comments.
-
-#ifdef HAVE_RUBBERBAND
-    m_timeStretchRatioMutex.lock();
-    while ((available = ts->available()) < count) {
-#else
-    while ((available = ts->getAvailableOutputSamples()) < count) {
+        for (size_t c = 0; c < channels; ++c) {
+            if (c >= m_stretcherInputCount) continue;
+            RingBuffer<float> *rb = getReadRingBuffer(c);
+            if (rb) {
+                size_t gotHere = rb->read(m_stretcherInputs[c], got);
+                if (gotHere < got) got = gotHere;
+                
+#ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
+                if (c == 0) {
+                    std::cerr << "feeding stretcher: got " << gotHere
+                              << ", " << rb->getReadSpace() << " remain" << std::endl;
+                }
 #endif
-
-        size_t reqd = lrintf((count - available) / ratio);
-#ifdef HAVE_RUBBERBAND
-        reqd = std::max(reqd, ts->getSamplesRequired());
-#else
-        reqd = std::max(reqd, ts->getRequiredInputSamples());
-#endif
-        if (reqd == 0) reqd = 1;
                 
-        float *ib[channels];
-
-        size_t got = reqd;
-
-        if (mix) {
-            for (size_t c = 0; c < channels; ++c) {
-                if (c == 0) ib[c] = new float[reqd]; //!!! fix -- this is a rt function
-                else ib[c] = 0;
-                RingBuffer<float> *rb = getReadRingBuffer(c);
-                if (rb) {
-                    size_t gotHere;
-                    if (c > 0) gotHere = rb->readAdding(ib[0], got);
-                    else gotHere = rb->read(ib[0], got);
-                    if (gotHere < got) got = gotHere;
-                }
-            }
-        } else {
-            for (size_t c = 0; c < channels; ++c) {
-                ib[c] = new float[reqd]; //!!! fix -- this is a rt function
-                RingBuffer<float> *rb = getReadRingBuffer(c);
-                if (rb) {
-                    size_t gotHere = rb->read(ib[c], got);
-                    if (gotHere < got) got = gotHere;
-                }
+            } else {
+                std::cerr << "WARNING: No ring buffer available for channel " << c << " in stretcher input block" << std::endl;
             }
         }
 
@@ -969,46 +1096,20 @@
             std::cerr << "WARNING: Read underrun in playback ("
                       << got << " < " << reqd << ")" << std::endl;
         }
-                
-#ifdef HAVE_RUBBERBAND
-        ts->process(ib, got, false);
-#else
-        ts->putInput(ib, got);
-#endif
 
-        for (size_t c = 0; c < channels; ++c) {
-            delete[] ib[c];
-        }
+        ts->process(m_stretcherInputs, got, false);
+
+        fedToStretcher += got;
 
         if (got == 0) break;
 
-#ifdef HAVE_RUBBERBAND
         if (ts->available() == available) {
-#else
-        if (ts->getAvailableOutputSamples() == available) {
-#endif
             std::cerr << "WARNING: AudioCallbackPlaySource::getSamples: Added " << got << " samples to time stretcher, created no new available output samples (warned = " << warned << ")" << std::endl;
             if (++warned == 5) break;
         }
     }
 
-#ifdef HAVE_RUBBERBAND
     ts->retrieve(buffer, count);
-    m_timeStretchRatioMutex.unlock();
-#else
-    ts->getOutput(buffer, count);
-#endif
-
-    if (mix) {
-        for (size_t c = 1; c < channels; ++c) {
-            for (size_t i = 0; i < count; ++i) {
-                buffer[c][i] = buffer[0][i] / channels;
-            }
-        }
-        for (size_t i = 0; i < count; ++i) {
-            buffer[0][i] /= channels;
-        }
-    }
 
     applyAuditioningEffect(count, buffer);
 
@@ -1184,11 +1285,7 @@
 	
 	int err = 0;
 
-#ifdef HAVE_RUBBERBAND
         if (m_timeStretcher && m_timeStretcher->getTimeRatio() < 0.4) {
-#else
-        if (m_timeStretcher && m_timeStretcher->getRatio() < 0.4) {
-#endif
 #ifdef DEBUG_AUDIO_PLAY_SOURCE
             std::cout << "Using crappy converter" << std::endl;
 #endif
@@ -1227,7 +1324,13 @@
 
 	// space must be a multiple of generatorBlockSize
 	space = (space / generatorBlockSize) * generatorBlockSize;
-	if (space == 0) return false;
+	if (space == 0) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+            std::cout << "requested fill is less than generator block size of "
+                      << generatorBlockSize << ", leaving it" << std::endl;
+#endif
+            return false;
+        }
 
 	if (tmpSize < channels * space) {
 	    delete[] tmp;
@@ -1513,9 +1616,6 @@
 	s.unifyRingBuffers();
 	s.m_bufferScavenger.scavenge();
         s.m_pluginScavenger.scavenge();
-#ifndef HAVE_RUBBERBAND
-	s.m_timeStretcherScavenger.scavenge();
-#endif
 
 	if (work && s.m_playing && s.getSourceSampleRate()) {
 	    
--- a/audioio/AudioCallbackPlaySource.h	Fri Nov 30 17:36:14 2007 +0000
+++ b/audioio/AudioCallbackPlaySource.h	Wed Feb 27 10:32:45 2008 +0000
@@ -26,23 +26,23 @@
 #include <QWaitCondition>
 
 #include "base/Thread.h"
+#include "base/RealTime.h"
 
 #include <samplerate.h>
 
 #include <set>
 #include <map>
 
-#ifdef HAVE_RUBBERBAND
-#include <rubberband/RubberBandStretcher.h>
-#else
-class PhaseVocoderTimeStretcher;
-#endif
+namespace RubberBand {
+    class RubberBandStretcher;
+}
 
 class Model;
 class ViewManager;
 class AudioGenerator;
 class PlayParameters;
 class RealTimePluginInstance;
+class AudioCallbackPlayTarget;
 
 /**
  * AudioCallbackPlaySource manages audio data supply to callback-based
@@ -100,6 +100,12 @@
      * out of the speakers.  (i.e. compensating for playback latency.)
      */
     virtual size_t getCurrentPlayingFrame();
+    
+    /** 
+     * Return the last frame that would come out of the speakers if we
+     * stopped playback right now.
+     */
+    virtual size_t getCurrentBufferedFrame();
 
     /**
      * Return the frame at which playback is expected to end (if not looping).
@@ -107,13 +113,16 @@
     virtual size_t getPlayEndFrame() { return m_lastModelEndFrame; }
 
     /**
-     * Set the block size of the target audio device.  This should
-     * be called by the target class.
+     * Set the target and the block size of the target audio device.
+     * This should be called by the target class.
      */
-    void setTargetBlockSize(size_t);
+    void setTarget(AudioCallbackPlayTarget *, size_t blockSize);
 
     /**
-     * Get the block size of the target audio device.
+     * Get the block size of the target audio device.  This may be an
+     * estimate or upper bound, if the target has a variable block
+     * size; the source should behave itself even if this value turns
+     * out to be inaccurate.
      */
     size_t getTargetBlockSize() const;
 
@@ -190,12 +199,9 @@
     size_t getSourceSamples(size_t count, float **buffer);
 
     /**
-     * Set the time stretcher factor (i.e. playback speed).  Also
-     * specify whether the time stretcher will be variable rate
-     * (sharpening transients), and whether time stretching will be
-     * carried out on data mixed down to mono for speed.
+     * Set the time stretcher factor (i.e. playback speed).
      */
-    void setTimeStretch(float factor, bool sharpen, bool mono);
+    void setTimeStretch(float factor);
 
     /**
      * Set the resampler quality, 0 - 2 where 0 is fastest and 2 is
@@ -279,6 +285,9 @@
     size_t                            m_sourceSampleRate;
     size_t                            m_targetSampleRate;
     size_t                            m_playLatency;
+    AudioCallbackPlayTarget          *m_target;
+    double                            m_lastRetrievalTimestamp;
+    size_t                            m_lastRetrievedBlockSize;
     bool                              m_playing;
     bool                              m_exiting;
     size_t                            m_lastModelEndFrame;
@@ -288,6 +297,9 @@
     RealTimePluginInstance           *m_auditioningPlugin;
     bool                              m_auditioningPluginBypassed;
     Scavenger<RealTimePluginInstance> m_pluginScavenger;
+    size_t                            m_playStartFrame;
+    bool                              m_playStartFramePassed;
+    RealTime                          m_playStartedAt;
 
     RingBuffer<float> *getWriteRingBuffer(size_t c) {
 	if (m_writeBuffers && c < m_writeBuffers->size()) {
@@ -309,13 +321,12 @@
     void clearRingBuffers(bool haveLock = false, size_t count = 0);
     void unifyRingBuffers();
 
-#ifdef HAVE_RUBBERBAND
     RubberBand::RubberBandStretcher *m_timeStretcher;
-    QMutex m_timeStretchRatioMutex;
-#else
-    PhaseVocoderTimeStretcher *m_timeStretcher;
-    Scavenger<PhaseVocoderTimeStretcher> m_timeStretcherScavenger;
-#endif
+    float m_stretchRatio;
+    
+    size_t  m_stretcherInputCount;
+    float **m_stretcherInputs;
+    size_t *m_stretcherInputSizes;
 
     // Called from fill thread, m_playing true, mutex held
     // Return true if work done
@@ -330,6 +341,13 @@
     // Called from getSourceSamples.
     void applyAuditioningEffect(size_t count, float **buffers);
 
+    // Ranges of current selections, if play selection is active
+    std::vector<RealTime> m_rangeStarts;
+    std::vector<RealTime> m_rangeDurations;
+    void rebuildRangeLists();
+
+    size_t getCurrentFrame(RealTime outputLatency);
+
     class FillThread : public Thread
     {
     public:
--- a/audioio/AudioCallbackPlayTarget.h	Fri Nov 30 17:36:14 2007 +0000
+++ b/audioio/AudioCallbackPlayTarget.h	Wed Feb 27 10:32:45 2008 +0000
@@ -32,6 +32,8 @@
 
     virtual void shutdown() = 0;
 
+    virtual double getCurrentTime() const = 0;
+
     float getOutputGain() const {
 	return m_outputGain;
     }
--- a/audioio/AudioGenerator.cpp	Fri Nov 30 17:36:14 2007 +0000
+++ b/audioio/AudioGenerator.cpp	Wed Feb 27 10:32:45 2008 +0000
@@ -434,23 +434,60 @@
 				       float **buffer, float gain, float pan,
 				       size_t fadeIn, size_t fadeOut)
 {
-    static float *channelBuffer = 0;
-    static size_t channelBufSiz = 0;
+    static float **channelBuffer = 0;
+    static size_t  channelBufSiz = 0;
+    static size_t  channelBufCount = 0;
 
     size_t totalFrames = frames + fadeIn/2 + fadeOut/2;
 
-    if (channelBufSiz < totalFrames) {
+    size_t modelChannels = dtvm->getChannelCount();
+
+    if (channelBufSiz < totalFrames || channelBufCount < modelChannels) {
+
+        for (size_t c = 0; c < channelBufCount; ++c) {
+            delete[] channelBuffer[c];
+        }
+
 	delete[] channelBuffer;
-	channelBuffer = new float[totalFrames];
+        channelBuffer = new float *[modelChannels];
+
+        for (size_t c = 0; c < modelChannels; ++c) {
+            channelBuffer[c] = new float[totalFrames];
+        }
+
+        channelBufCount = modelChannels;
 	channelBufSiz = totalFrames;
     }
-    
+
     size_t got = 0;
-    size_t prevChannel = 999;
+
+    if (startFrame >= fadeIn/2) {
+        got = dtvm->getData(0, modelChannels - 1,
+                            startFrame - fadeIn/2,
+                            frames + fadeOut/2 + fadeIn/2,
+                            channelBuffer);
+    } else {
+        size_t missing = fadeIn/2 - startFrame;
+
+        for (size_t c = 0; c < modelChannels; ++c) {
+            channelBuffer[c] += missing;
+        }
+
+        got = dtvm->getData(0, modelChannels - 1,
+                            startFrame,
+                            frames + fadeOut/2,
+                            channelBuffer);
+
+        for (size_t c = 0; c < modelChannels; ++c) {
+            channelBuffer[c] -= missing;
+        }
+
+        got += missing;
+    }	    
 
     for (size_t c = 0; c < m_targetChannelCount; ++c) {
 
-	size_t sourceChannel = (c % dtvm->getChannelCount());
+	size_t sourceChannel = (c % modelChannels);
 
 //	std::cerr << "mixing channel " << c << " from source channel " << sourceChannel << std::endl;
 
@@ -463,28 +500,10 @@
 	    }
 	}
 
-	if (prevChannel != sourceChannel) {
-	    if (startFrame >= fadeIn/2) {
-		got = dtvm->getData
-		    (sourceChannel,
-		     startFrame - fadeIn/2,
-                     frames + fadeOut/2 + fadeIn/2,
-		     channelBuffer);
-	    } else {
-		size_t missing = fadeIn/2 - startFrame;
-		got = dtvm->getData
-		    (sourceChannel,
-		     startFrame,
-                     frames + fadeOut/2,
-		     channelBuffer + missing);
-	    }	    
-	}
-	prevChannel = sourceChannel;
-
 	for (size_t i = 0; i < fadeIn/2; ++i) {
 	    float *back = buffer[c];
 	    back -= fadeIn/2;
-	    back[i] += (channelGain * channelBuffer[i] * i) / fadeIn;
+	    back[i] += (channelGain * channelBuffer[sourceChannel][i] * i) / fadeIn;
 	}
 
 	for (size_t i = 0; i < frames + fadeOut/2; ++i) {
@@ -495,7 +514,9 @@
 	    if (i > frames - fadeOut/2) {
 		mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut;
 	    }
-	    buffer[c][i] += mult * channelBuffer[i];
+            float val = channelBuffer[sourceChannel][i];
+            if (i >= got) val = 0.f;
+	    buffer[c][i] += mult * val;
 	}
     }
 
--- a/audioio/AudioJACKTarget.cpp	Fri Nov 30 17:36:14 2007 +0000
+++ b/audioio/AudioJACKTarget.cpp	Wed Feb 27 10:32:45 2008 +0000
@@ -184,6 +184,7 @@
 dynamic1(int, jack_activate, jack_client_t *, 1);
 dynamic1(int, jack_deactivate, jack_client_t *, 1);
 dynamic1(int, jack_client_close, jack_client_t *, 1);
+dynamic1(jack_nframes_t, jack_frame_time, jack_client_t *, 0);
 dynamic1(jack_nframes_t, jack_port_get_latency, jack_port_t *, 0);
 dynamic1(const char *, jack_port_name, const jack_port_t *, 0);
 
@@ -196,6 +197,7 @@
 #define jack_activate dynamic_jack_activate
 #define jack_deactivate dynamic_jack_deactivate
 #define jack_client_close dynamic_jack_client_close
+#define jack_frame_time dynamic_jack_frame_time
 #define jack_get_ports dynamic_jack_get_ports
 #define jack_port_register dynamic_jack_port_register
 #define jack_port_unregister dynamic_jack_port_unregister
@@ -243,12 +245,24 @@
     if (m_source) {
 	sourceModelReplaced();
     }
+    
+    // Mainstream JACK (though not jackdmp) calls mlockall() to lock
+    // down all memory for real-time operation.  That isn't a terribly
+    // good idea in an application like this that may have very high
+    // dynamic memory usage in other threads, as mlockall() applies
+    // across all threads.  We're far better off undoing it here and
+    // accepting the possible loss of true RT capability.
+    MUNLOCKALL();
 }
 
 AudioJACKTarget::~AudioJACKTarget()
 {
     std::cerr << "AudioJACKTarget::~AudioJACKTarget()" << std::endl;
 
+    if (m_source) {
+        m_source->setTarget(0, m_bufferSize);
+    }
+
     shutdown();
 
     if (m_client) {
@@ -285,6 +299,16 @@
     return (m_client != 0);
 }
 
+double
+AudioJACKTarget::getCurrentTime() const
+{
+    if (m_client && m_sampleRate) {
+        return double(jack_frame_time(m_client)) / double(m_sampleRate);
+    } else {
+        return 0.0;
+    }
+}
+
 int
 AudioJACKTarget::processStatic(jack_nframes_t nframes, void *arg)
 {
@@ -302,7 +326,7 @@
 {
     m_mutex.lock();
 
-    m_source->setTargetBlockSize(m_bufferSize);
+    m_source->setTarget(this, m_bufferSize);
     m_source->setTargetSampleRate(m_sampleRate);
 
     size_t channels = m_source->getSourceChannelCount();
--- a/audioio/AudioJACKTarget.h	Fri Nov 30 17:36:14 2007 +0000
+++ b/audioio/AudioJACKTarget.h	Wed Feb 27 10:32:45 2008 +0000
@@ -39,6 +39,8 @@
 
     virtual bool isOK() const;
 
+    virtual double getCurrentTime() const;
+
 public slots:
     virtual void sourceModelReplaced();
 
--- a/audioio/AudioPortAudioTarget.cpp	Fri Nov 30 17:36:14 2007 +0000
+++ b/audioio/AudioPortAudioTarget.cpp	Wed Feb 27 10:32:45 2008 +0000
@@ -48,7 +48,7 @@
 	return;
     }
 
-    m_bufferSize = 1024;
+    m_bufferSize = 2048;
     m_sampleRate = 44100;
     if (m_source && (m_source->getSourceSampleRate() != 0)) {
 	m_sampleRate = m_source->getSourceSampleRate();
@@ -63,11 +63,29 @@
 			       m_sampleRate, m_bufferSize, 0,
 			       processStatic, this);
 #else
-    err = Pa_OpenDefaultStream(&m_stream, 0, 2, paFloat32,
-			       m_sampleRate, m_bufferSize,
-			       processStatic, this);
+    PaStreamParameters op;
+    op.device = Pa_GetDefaultOutputDevice();
+    op.channelCount = 2;
+    op.sampleFormat = paFloat32;
+    op.suggestedLatency = 0.2;
+    op.hostApiSpecificStreamInfo = 0;
+    err = Pa_OpenStream(&m_stream, 0, &op, m_sampleRate,
+                        paFramesPerBufferUnspecified,
+                        paNoFlag, processStatic, this);
 #endif    
 
+#ifndef HAVE_PORTAUDIO_V18
+    if (err != paNoError) {
+
+        std::cerr << "WARNING: AudioPortAudioTarget: Failed to open PortAudio stream with default frames per buffer, trying again with fixed frames per buffer..." << std::endl;
+        
+        err = Pa_OpenStream(&m_stream, 0, &op, m_sampleRate,
+                            1024,
+                            paNoFlag, processStatic, this);
+	m_bufferSize = 1024;
+    }
+#endif
+
     if (err != paNoError) {
 	std::cerr << "ERROR: AudioPortAudioTarget: Failed to open PortAudio stream: " << Pa_GetErrorText(err) << std::endl;
 	m_stream = 0;
@@ -78,6 +96,7 @@
 #ifndef HAVE_PORTAUDIO_V18
     const PaStreamInfo *info = Pa_GetStreamInfo(m_stream);
     m_latency = int(info->outputLatency * m_sampleRate + 0.001);
+    if (m_bufferSize < m_latency) m_bufferSize = m_latency;
 #endif
 
     std::cerr << "PortAudio latency = " << m_latency << " frames" << std::endl;
@@ -94,7 +113,7 @@
 
     if (m_source) {
 	std::cerr << "AudioPortAudioTarget: block size " << m_bufferSize << std::endl;
-	m_source->setTargetBlockSize(m_bufferSize);
+	m_source->setTarget(this, m_bufferSize);
 	m_source->setTargetSampleRate(m_sampleRate);
 	m_source->setTargetPlayLatency(m_latency);
     }
@@ -108,6 +127,10 @@
 {
     std::cerr << "AudioPortAudioTarget::~AudioPortAudioTarget()" << std::endl;
 
+    if (m_source) {
+        m_source->setTarget(0, m_bufferSize);
+    }
+
     shutdown();
 
     if (m_stream) {
@@ -145,6 +168,13 @@
     return (m_stream != 0);
 }
 
+double
+AudioPortAudioTarget::getCurrentTime() const
+{
+    if (!m_stream) return 0.0;
+    else return Pa_GetStreamTime(m_stream);
+}
+
 #ifdef HAVE_PORTAUDIO_V18
 int
 AudioPortAudioTarget::processStatic(void *input, void *output,
--- a/audioio/AudioPortAudioTarget.h	Fri Nov 30 17:36:14 2007 +0000
+++ b/audioio/AudioPortAudioTarget.h	Wed Feb 27 10:32:45 2008 +0000
@@ -41,6 +41,8 @@
 
     virtual bool isOK() const;
 
+    virtual double getCurrentTime() const;
+
 public slots:
     virtual void sourceModelReplaced();
 
--- a/audioio/PhaseVocoderTimeStretcher.cpp	Fri Nov 30 17:36:14 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,629 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam and QMUL.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef HAVE_RUBBERBAND
-
-#include "PhaseVocoderTimeStretcher.h"
-
-#include <iostream>
-#include <cassert>
-
-#include <QMutexLocker>
-
-//#define DEBUG_PHASE_VOCODER_TIME_STRETCHER 1
-
-PhaseVocoderTimeStretcher::PhaseVocoderTimeStretcher(size_t sampleRate,
-                                                     size_t channels,
-                                                     float ratio,
-                                                     bool sharpen,
-                                                     size_t maxOutputBlockSize) :
-    m_sampleRate(sampleRate),
-    m_channels(channels),
-    m_maxOutputBlockSize(maxOutputBlockSize),
-    m_ratio(ratio),
-    m_sharpen(sharpen),
-    m_totalCount(0),
-    m_transientCount(0),
-    m_n2sum(0),
-    m_mutex(new QMutex())
-{
-    initialise();
-}
-
-PhaseVocoderTimeStretcher::~PhaseVocoderTimeStretcher()
-{
-    std::cerr << "PhaseVocoderTimeStretcher::~PhaseVocoderTimeStretcher" << std::endl;
-
-    cleanup();
-    
-    delete m_mutex;
-}
-
-void
-PhaseVocoderTimeStretcher::initialise()
-{
-    std::cerr << "PhaseVocoderTimeStretcher::initialise" << std::endl;
-
-    calculateParameters();
-        
-    m_analysisWindow = new Window<float>(HanningWindow, m_wlen);
-    m_synthesisWindow = new Window<float>(HanningWindow, m_wlen);
-
-    m_prevPhase = new float *[m_channels];
-    m_prevAdjustedPhase = new float *[m_channels];
-
-    m_prevTransientMag = (float *)fftf_malloc(sizeof(float) * (m_wlen / 2 + 1));
-    m_prevTransientScore = 0;
-    m_prevTransient = false;
-
-    m_tempbuf = (float *)fftf_malloc(sizeof(float) * m_wlen);
-
-    m_time = new float *[m_channels];
-    m_freq = new fftf_complex *[m_channels];
-    m_plan = new fftf_plan[m_channels];
-    m_iplan = new fftf_plan[m_channels];
-
-    m_inbuf = new RingBuffer<float> *[m_channels];
-    m_outbuf = new RingBuffer<float> *[m_channels];
-    m_mashbuf = new float *[m_channels];
-
-    m_modulationbuf = (float *)fftf_malloc(sizeof(float) * m_wlen);
-        
-    for (size_t c = 0; c < m_channels; ++c) {
-
-        m_prevPhase[c] = (float *)fftf_malloc(sizeof(float) * (m_wlen / 2 + 1));
-        m_prevAdjustedPhase[c] = (float *)fftf_malloc(sizeof(float) * (m_wlen / 2 + 1));
-
-        m_time[c] = (float *)fftf_malloc(sizeof(float) * m_wlen);
-        m_freq[c] = (fftf_complex *)fftf_malloc(sizeof(fftf_complex) *
-                                                  (m_wlen / 2 + 1));
-        
-        m_plan[c] = fftf_plan_dft_r2c_1d(m_wlen, m_time[c], m_freq[c], FFTW_MEASURE);
-        m_iplan[c] = fftf_plan_dft_c2r_1d(m_wlen, m_freq[c], m_time[c], FFTW_MEASURE);
-
-        m_outbuf[c] = new RingBuffer<float>
-            ((m_maxOutputBlockSize + m_wlen) * 2);
-        m_inbuf[c] = new RingBuffer<float>
-            (lrintf(m_outbuf[c]->getSize() / m_ratio) + m_wlen);
-
-        std::cerr << "making inbuf size " << m_inbuf[c]->getSize() << " (outbuf size is " << m_outbuf[c]->getSize() << ", ratio " << m_ratio << ")" << std::endl;
-
-           
-        m_mashbuf[c] = (float *)fftf_malloc(sizeof(float) * m_wlen);
-        
-        for (size_t i = 0; i < m_wlen; ++i) {
-            m_mashbuf[c][i] = 0.0;
-        }
-
-        for (size_t i = 0; i <= m_wlen/2; ++i) {
-            m_prevPhase[c][i] = 0.0;
-            m_prevAdjustedPhase[c][i] = 0.0;
-        }
-    }
-
-    for (size_t i = 0; i < m_wlen; ++i) {
-        m_modulationbuf[i] = 0.0;
-    }
-
-    for (size_t i = 0; i <= m_wlen/2; ++i) {
-        m_prevTransientMag[i] = 0.0;
-    }
-}
-
-void
-PhaseVocoderTimeStretcher::calculateParameters()
-{
-    std::cerr << "PhaseVocoderTimeStretcher::calculateParameters" << std::endl;
-
-    m_wlen = 1024;
-
-    //!!! In transient sharpening mode, we need to pick the window
-    //length so as to be more or less fixed in audio duration (i.e. we
-    //need to exploit the sample rate)
-
-    //!!! have to work out the relationship between wlen and transient
-    //threshold
-
-    if (m_ratio < 1) {
-        if (m_ratio < 0.4) {
-            m_n1 = 1024;
-            m_wlen = 2048;
-        } else if (m_ratio < 0.8) {
-            m_n1 = 512;
-        } else {
-            m_n1 = 256;
-        }
-        if (shouldSharpen()) {
-            m_wlen = 2048;
-        }
-        m_n2 = lrintf(m_n1 * m_ratio);
-    } else {
-        if (m_ratio > 2) {
-            m_n2 = 512;
-            m_wlen = 4096; 
-        } else if (m_ratio > 1.6) {
-            m_n2 = 384;
-            m_wlen = 2048;
-        } else {
-            m_n2 = 256;
-        }
-        if (shouldSharpen()) {
-            if (m_wlen < 2048) m_wlen = 2048;
-        }
-        m_n1 = lrintf(m_n2 / m_ratio);
-        if (m_n1 == 0) {
-            m_n1 = 1;
-            m_n2 = lrintf(m_ratio);
-        }
-    }
-
-    m_transientThreshold = lrintf(m_wlen / 4.5);
-
-    m_totalCount = 0;
-    m_transientCount = 0;
-    m_n2sum = 0;
-
-
-    std::cerr << "PhaseVocoderTimeStretcher: channels = " << m_channels
-              << ", ratio = " << m_ratio
-              << ", n1 = " << m_n1 << ", n2 = " << m_n2 << ", wlen = "
-              << m_wlen << ", max = " << m_maxOutputBlockSize << std::endl;
-//              << ", outbuflen = " << m_outbuf[0]->getSize() << std::endl;
-}
-
-void
-PhaseVocoderTimeStretcher::cleanup()
-{
-    std::cerr << "PhaseVocoderTimeStretcher::cleanup" << std::endl;
-
-    for (size_t c = 0; c < m_channels; ++c) {
-
-        fftf_destroy_plan(m_plan[c]);
-        fftf_destroy_plan(m_iplan[c]);
-
-        fftf_free(m_time[c]);
-        fftf_free(m_freq[c]);
-
-        fftf_free(m_mashbuf[c]);
-        fftf_free(m_prevPhase[c]);
-        fftf_free(m_prevAdjustedPhase[c]);
-
-        delete m_inbuf[c];
-        delete m_outbuf[c];
-    }
-
-    fftf_free(m_tempbuf);
-    fftf_free(m_modulationbuf);
-    fftf_free(m_prevTransientMag);
-
-    delete[] m_prevPhase;
-    delete[] m_prevAdjustedPhase;
-    delete[] m_inbuf;
-    delete[] m_outbuf;
-    delete[] m_mashbuf;
-    delete[] m_time;
-    delete[] m_freq;
-    delete[] m_plan;
-    delete[] m_iplan;
-
-    delete m_analysisWindow;
-    delete m_synthesisWindow;
-}	
-
-void
-PhaseVocoderTimeStretcher::setRatio(float ratio)
-{
-    QMutexLocker locker(m_mutex);
-
-    size_t formerWlen = m_wlen;
-    m_ratio = ratio;
-
-    std::cerr << "PhaseVocoderTimeStretcher::setRatio: new ratio " << ratio
-              << std::endl;
-
-    calculateParameters();
-
-    if (m_wlen == formerWlen) {
-
-        // This is the only container whose size depends on m_ratio
-
-        RingBuffer<float> **newin = new RingBuffer<float> *[m_channels];
-
-        size_t formerSize = m_inbuf[0]->getSize();
-        size_t newSize = lrintf(m_outbuf[0]->getSize() / m_ratio) + m_wlen;
-
-        std::cerr << "resizing inbuf from " << formerSize << " to "
-                  << newSize << " (outbuf size is " << m_outbuf[0]->getSize() << ", ratio " << m_ratio << ")" << std::endl;
-
-        if (formerSize != newSize) {
-
-            size_t ready = m_inbuf[0]->getReadSpace();
-
-            for (size_t c = 0; c < m_channels; ++c) {
-                newin[c] = new RingBuffer<float>(newSize);
-            }
-
-            if (ready > 0) {
-
-                size_t copy = std::min(ready, newSize);
-                float *tmp = new float[ready];
-
-                for (size_t c = 0; c < m_channels; ++c) {
-                    m_inbuf[c]->read(tmp, ready);
-                    newin[c]->write(tmp + ready - copy, copy);
-                }
-                
-                delete[] tmp;
-            }
-            
-            for (size_t c = 0; c < m_channels; ++c) {
-                delete m_inbuf[c];
-            }
-            
-            delete[] m_inbuf;
-            m_inbuf = newin;
-        }
-
-    } else {
-        
-        std::cerr << "wlen changed" << std::endl;
-        cleanup();
-        initialise();
-    }
-}
-
-size_t
-PhaseVocoderTimeStretcher::getProcessingLatency() const
-{
-    return getWindowSize() - getInputIncrement();
-}
-
-size_t
-PhaseVocoderTimeStretcher::getRequiredInputSamples() const
-{
-    QMutexLocker locker(m_mutex);
-
-    if (m_inbuf[0]->getReadSpace() >= m_wlen) return 0;
-    return m_wlen - m_inbuf[0]->getReadSpace();
-}
-
-void
-PhaseVocoderTimeStretcher::putInput(float **input, size_t samples)
-{
-    QMutexLocker locker(m_mutex);
-
-    // We need to add samples from input to our internal buffer.  When
-    // we have m_windowSize samples in the buffer, we can process it,
-    // move the samples back by m_n1 and write the output onto our
-    // internal output buffer.  If we have (samples * ratio) samples
-    // in that, we can write m_n2 of them back to output and return
-    // (otherwise we have to write zeroes).
-
-    // When we process, we write m_wlen to our fixed output buffer
-    // (m_mashbuf).  We then pull out the first m_n2 samples from that
-    // buffer, push them into the output ring buffer, and shift
-    // m_mashbuf left by that amount.
-
-    // The processing latency is then m_wlen - m_n2.
-
-    size_t consumed = 0;
-
-    while (consumed < samples) {
-
-	size_t writable = m_inbuf[0]->getWriteSpace();
-	writable = std::min(writable, samples - consumed);
-
-	if (writable == 0) {
-#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
-	    std::cerr << "WARNING: PhaseVocoderTimeStretcher::putInput: writable == 0 (inbuf has " << m_inbuf[0]->getReadSpace() << " samples available for reading, space for " << m_inbuf[0]->getWriteSpace() << " more)" << std::endl;
-#endif
-            if (m_inbuf[0]->getReadSpace() < m_wlen ||
-                m_outbuf[0]->getWriteSpace() < m_n2) {
-                std::cerr << "WARNING: PhaseVocoderTimeStretcher::putInput: Inbuf has " << m_inbuf[0]->getReadSpace() << ", outbuf has space for " << m_outbuf[0]->getWriteSpace() << " (n2 = " << m_n2 << ", wlen = " << m_wlen << "), won't be able to process" << std::endl;
-                break;
-            }
-	} else {
-
-#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
-            std::cerr << "writing " << writable << " from index " << consumed << " to inbuf, consumed will be " << consumed + writable << std::endl;
-#endif
-
-            for (size_t c = 0; c < m_channels; ++c) {
-                m_inbuf[c]->write(input[c] + consumed, writable);
-            }
-            consumed += writable;
-        }
-
-	while (m_inbuf[0]->getReadSpace() >= m_wlen &&
-	       m_outbuf[0]->getWriteSpace() >= m_n2) {
-
-	    // We know we have at least m_wlen samples available
-	    // in m_inbuf.  We need to peek m_wlen of them for
-	    // processing, and then read m_n1 to advance the read
-	    // pointer.
-            
-            for (size_t c = 0; c < m_channels; ++c) {
-
-                size_t got = m_inbuf[c]->peek(m_tempbuf, m_wlen);
-                assert(got == m_wlen);
-
-                analyseBlock(c, m_tempbuf);
-            }
-
-            bool transient = false;
-            if (shouldSharpen()) transient = isTransient();
-
-            size_t n2 = m_n2;
-
-            if (transient) {
-                n2 = m_n1;
-            }
-
-            ++m_totalCount;
-            if (transient) ++m_transientCount;
-            m_n2sum += n2;
-
-//            std::cerr << "ratio for last 10: " <<last10num << "/" << (10 * m_n1) << " = " << float(last10num) / float(10 * m_n1) << " (should be " << m_ratio << ")" << std::endl;
-            
-            if (m_totalCount > 50 && m_transientCount < m_totalCount) {
-
-                int fixed = lrintf(m_transientCount * m_n1);
-
-                int idealTotal = lrintf(m_totalCount * m_n1 * m_ratio);
-                int idealSquashy = idealTotal - fixed;
-
-                int squashyCount = m_totalCount - m_transientCount;
-                
-                n2 = lrintf(idealSquashy / squashyCount);
-
-#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
-                if (n2 != m_n2) {
-                    std::cerr << m_n2 << " -> " << n2 << std::endl;
-                }
-#endif
-            }
-
-            for (size_t c = 0; c < m_channels; ++c) {
-
-                synthesiseBlock(c, m_mashbuf[c],
-                                c == 0 ? m_modulationbuf : 0,
-                                m_prevTransient ? m_n1 : m_n2);
-
-
-#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
-                std::cerr << "writing first " << m_n2 << " from mashbuf, skipping " << m_n1 << " on inbuf " << std::endl;
-#endif
-                m_inbuf[c]->skip(m_n1);
-
-                for (size_t i = 0; i < n2; ++i) {
-                    if (m_modulationbuf[i] > 0.f) {
-                        m_mashbuf[c][i] /= m_modulationbuf[i];
-                    }
-                }
-
-                m_outbuf[c]->write(m_mashbuf[c], n2);
-
-                for (size_t i = 0; i < m_wlen - n2; ++i) {
-                    m_mashbuf[c][i] = m_mashbuf[c][i + n2];
-                }
-
-                for (size_t i = m_wlen - n2; i < m_wlen; ++i) {
-                    m_mashbuf[c][i] = 0.0f;
-                }
-            }
-
-            m_prevTransient = transient;
-
-            for (size_t i = 0; i < m_wlen - n2; ++i) {
-                m_modulationbuf[i] = m_modulationbuf[i + n2];
-	    }
-
-	    for (size_t i = m_wlen - n2; i < m_wlen; ++i) {
-                m_modulationbuf[i] = 0.0f;
-	    }
-
-            if (!transient) m_n2 = n2;
-	}
-
-
-#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
-	std::cerr << "loop ended: inbuf read space " << m_inbuf[0]->getReadSpace() << ", outbuf write space " << m_outbuf[0]->getWriteSpace() << std::endl;
-#endif
-    }
-
-#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
-    std::cerr << "PhaseVocoderTimeStretcher::putInput returning" << std::endl;
-#endif
-
-//    std::cerr << "ratio: nominal: " << getRatio() << " actual: "
-//              << m_total2 << "/" << m_total1 << " = " << float(m_total2) / float(m_total1) << " ideal: " << m_ratio << std::endl;
-}
-
-size_t
-PhaseVocoderTimeStretcher::getAvailableOutputSamples() const
-{
-    QMutexLocker locker(m_mutex);
-
-    return m_outbuf[0]->getReadSpace();
-}
-
-void
-PhaseVocoderTimeStretcher::getOutput(float **output, size_t samples)
-{
-    QMutexLocker locker(m_mutex);
-
-    if (m_outbuf[0]->getReadSpace() < samples) {
-	std::cerr << "WARNING: PhaseVocoderTimeStretcher::getOutput: not enough data (yet?) (" << m_outbuf[0]->getReadSpace() << " < " << samples << ")" << std::endl;
-	size_t fill = samples - m_outbuf[0]->getReadSpace();
-        for (size_t c = 0; c < m_channels; ++c) {
-            for (size_t i = 0; i < fill; ++i) {
-                output[c][i] = 0.0;
-            }
-            m_outbuf[c]->read(output[c] + fill, m_outbuf[c]->getReadSpace());
-        }
-    } else {
-#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
-	std::cerr << "enough data - writing " << samples << " from outbuf" << std::endl;
-#endif
-        for (size_t c = 0; c < m_channels; ++c) {
-            m_outbuf[c]->read(output[c], samples);
-        }
-    }
-
-#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
-    std::cerr << "PhaseVocoderTimeStretcher::getOutput returning" << std::endl;
-#endif
-}
-
-void
-PhaseVocoderTimeStretcher::analyseBlock(size_t c, float *buf)
-{
-    size_t i;
-
-    // buf contains m_wlen samples
-
-#ifdef DEBUG_PHASE_VOCODER_TIME_STRETCHER
-    std::cerr << "PhaseVocoderTimeStretcher::analyseBlock (channel " << c << ")" << std::endl;
-#endif
-
-    m_analysisWindow->cut(buf);
-
-    for (i = 0; i < m_wlen/2; ++i) {
-	float temp = buf[i];
-	buf[i] = buf[i + m_wlen/2];
-	buf[i + m_wlen/2] = temp;
-    }
-
-    for (i = 0; i < m_wlen; ++i) {
-	m_time[c][i] = buf[i];
-    }
-
-    fftf_execute(m_plan[c]); // m_time -> m_freq
-}
-
-bool
-PhaseVocoderTimeStretcher::isTransient()
-{
-    int count = 0;
-
-    for (size_t i = 0; i <= m_wlen/2; ++i) {
-
-        float real = 0.f, imag = 0.f;
-
-        for (size_t c = 0; c < m_channels; ++c) {
-            real += m_freq[c][i][0];
-            imag += m_freq[c][i][1];
-        }
-
-        float sqrmag = (real * real + imag * imag);
-
-        if (m_prevTransientMag[i] > 0.f) {
-            float diff = 10.f * log10f(sqrmag / m_prevTransientMag[i]);
-            if (diff > 3.f) ++count;
-        }
-
-        m_prevTransientMag[i] = sqrmag;
-    }
-
-    bool isTransient = false;
-
-//    if (count > m_transientThreshold &&
-//        count > m_prevTransientScore * 1.2) {
-    if (count > m_prevTransientScore &&
-        count > m_transientThreshold &&
-        count - m_prevTransientScore > int(m_wlen) / 20) {
-        isTransient = true;
-
-
-//        std::cerr << "isTransient (count = " << count << ", prev = " << m_prevTransientScore << ", diff = " << count - m_prevTransientScore << ", ratio = " << (m_totalCount > 0 ? (float (m_n2sum) / float(m_totalCount * m_n1)) : 1.f) << ", ideal = " << m_ratio << ")" << std::endl;
-//    } else {
-//        std::cerr << " !transient (count = " << count << ", prev = " << m_prevTransientScore << ", diff = " << count - m_prevTransientScore << ")" << std::endl;
-    }
-
-    m_prevTransientScore = count;
-
-    return isTransient;
-}
-
-void
-PhaseVocoderTimeStretcher::synthesiseBlock(size_t c,
-                                           float *out,
-                                           float *modulation,
-                                           size_t lastStep)
-{
-    bool unchanged = (lastStep == m_n1);
-
-    for (size_t i = 0; i <= m_wlen/2; ++i) {
-		
-        float phase = princargf(atan2f(m_freq[c][i][1], m_freq[c][i][0]));
-        float adjustedPhase = phase;
-
-        if (!unchanged) {
-
-            float omega = (2 * M_PI * m_n1 * i) / m_wlen;
-	
-            float expectedPhase = m_prevPhase[c][i] + omega;
-
-            float phaseError = princargf(phase - expectedPhase);
-
-            float phaseIncrement = (omega + phaseError) / m_n1;
-            
-            adjustedPhase = m_prevAdjustedPhase[c][i] +
-                lastStep * phaseIncrement;
-            
-            float mag = sqrtf(m_freq[c][i][0] * m_freq[c][i][0] +
-                              m_freq[c][i][1] * m_freq[c][i][1]);
-            
-            float real = mag * cosf(adjustedPhase);
-            float imag = mag * sinf(adjustedPhase);
-            m_freq[c][i][0] = real;
-            m_freq[c][i][1] = imag;
-        }
-
-        m_prevPhase[c][i] = phase;
-        m_prevAdjustedPhase[c][i] = adjustedPhase;
-    }
-
-    fftf_execute(m_iplan[c]); // m_freq -> m_time, inverse fft
-
-    for (size_t i = 0; i < m_wlen/2; ++i) {
-        float temp = m_time[c][i];
-        m_time[c][i] = m_time[c][i + m_wlen/2];
-        m_time[c][i + m_wlen/2] = temp;
-    }
-    
-    for (size_t i = 0; i < m_wlen; ++i) {
-        m_time[c][i] = m_time[c][i] / m_wlen;
-    }
-
-    m_synthesisWindow->cut(m_time[c]);
-
-    for (size_t i = 0; i < m_wlen; ++i) {
-        out[i] += m_time[c][i];
-    }
-
-    if (modulation) {
-
-        float area = m_analysisWindow->getArea();
-
-        for (size_t i = 0; i < m_wlen; ++i) {
-            float val = m_synthesisWindow->getValue(i);
-            modulation[i] += val * area;
-        }
-    }
-}
-
-
-#endif
--- a/audioio/PhaseVocoderTimeStretcher.h	Fri Nov 30 17:36:14 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,191 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam and QMUL.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef _PHASE_VOCODER_TIME_STRETCHER_H_
-#define _PHASE_VOCODER_TIME_STRETCHER_H_
-
-#ifndef HAVE_RUBBERBAND
-
-#include "base/Window.h"
-#include "base/RingBuffer.h"
-
-#include "data/fft/FFTapi.h"
-
-#include <QMutex>
-
-/**
- * A time stretcher that alters the performance speed of audio,
- * preserving pitch.
- *
- * This is based on the straightforward phase vocoder with phase
- * unwrapping (as in e.g. the DAFX book pp275-), with optional
- * percussive transient detection to avoid smearing percussive notes
- * and resynchronise phases, and adding a stream API for real-time
- * use.  Principles and methods from Chris Duxbury, AES 2002 and 2004
- * thesis; Emmanuel Ravelli, DAFX 2005; Dan Barry, ISSC 2005 on
- * percussion detection; code by Chris Cannam.
- */
-
-class PhaseVocoderTimeStretcher
-{
-public:
-    PhaseVocoderTimeStretcher(size_t sampleRate,
-                              size_t channels,
-                              float ratio,
-                              bool sharpen,
-                              size_t maxOutputBlockSize);
-    virtual ~PhaseVocoderTimeStretcher();
-
-    /**
-     * Return the number of samples that would need to be added via
-     * putInput in order to provoke the time stretcher into doing some
-     * time stretching and making more output samples available.
-     * This will be an estimate, if transient sharpening is on; the 
-     * caller may need to do the put/get/test cycle more than once.
-     */
-    size_t getRequiredInputSamples() const;
-
-    /**
-     * Put (and possibly process) a given number of input samples.
-     * Number should usually equal the value returned from
-     * getRequiredInputSamples().
-     */
-    void putInput(float **input, size_t samples);
-
-    /**
-     * Get the number of processed samples ready for reading.
-     */
-    size_t getAvailableOutputSamples() const;
-
-    /**
-     * Get some processed samples.
-     */
-    void getOutput(float **output, size_t samples);
-
-    //!!! and reset?
-
-    /**
-     * Change the time stretch ratio.
-     */
-    void setRatio(float ratio);
-
-    /**
-     * Get the hop size for input.
-     */
-    size_t getInputIncrement() const { return m_n1; }
-
-    /**
-     * Get the hop size for output.
-     */
-    size_t getOutputIncrement() const { return m_n2; }
-
-    /**
-     * Get the window size for FFT processing.
-     */
-    size_t getWindowSize() const { return m_wlen; }
-
-    /**
-     * Get the stretch ratio.
-     */
-    float getRatio() const { return float(m_n2) / float(m_n1); }
-
-    /**
-     * Return whether this time stretcher will attempt to sharpen transients.
-     */
-    bool getSharpening() const { return m_sharpen; }
-
-    /**
-     * Return the number of channels for this time stretcher.
-     */
-    size_t getChannelCount() const { return m_channels; }
-
-    /**
-     * Get the latency added by the time stretcher, in sample frames.
-     * This will be exact if transient sharpening is off, or approximate
-     * if it is on.
-     */
-    size_t getProcessingLatency() const;
-
-protected:
-    /**
-     * Process a single phase vocoder frame from "in" into
-     * m_freq[channel].
-     */
-    void analyseBlock(size_t channel, float *in); // into m_freq[channel]
-
-    /**
-     * Examine m_freq[0..m_channels-1] and return whether a percussive
-     * transient is found.
-     */
-    bool isTransient(); 
-
-    /**
-     * Resynthesise from m_freq[channel] adding in to "out",
-     * adjusting phases on the basis of a prior step size of lastStep.
-     * Also add the window shape in to the modulation array (if
-     * present) -- for use in ensuring the output has the correct
-     * magnitude afterwards.
-     */
-    void synthesiseBlock(size_t channel, float *out, float *modulation,
-                         size_t lastStep);
-
-    void initialise();
-    void calculateParameters();
-    void cleanup();
-
-    bool shouldSharpen() {
-        return m_sharpen && (m_ratio > 0.25);
-    }
-
-    size_t m_sampleRate;
-    size_t m_channels;
-    size_t m_maxOutputBlockSize;
-    float m_ratio;
-    bool m_sharpen;
-    size_t m_n1;
-    size_t m_n2;
-    size_t m_wlen;
-    Window<float> *m_analysisWindow;
-    Window<float> *m_synthesisWindow;
-
-    int m_totalCount;
-    int m_transientCount;
-    int m_n2sum;
-
-    float **m_prevPhase;
-    float **m_prevAdjustedPhase;
-
-    float *m_prevTransientMag;
-    int  m_prevTransientScore;
-    int  m_transientThreshold;
-    bool m_prevTransient;
-
-    float *m_tempbuf;
-    float **m_time;
-    fftf_complex **m_freq;
-    fftf_plan *m_plan;
-    fftf_plan *m_iplan;
-    
-    RingBuffer<float> **m_inbuf;
-    RingBuffer<float> **m_outbuf;
-    float **m_mashbuf;
-    float *m_modulationbuf;
-
-    QMutex *m_mutex;
-};
-
-#endif
-
-#endif
--- a/audioio/audioio.pro	Fri Nov 30 17:36:14 2007 +0000
+++ b/audioio/audioio.pro	Wed Feb 27 10:32:45 2008 +0000
@@ -19,7 +19,6 @@
            AudioJACKTarget.h \
            AudioPortAudioTarget.h \
            AudioTargetFactory.h \
-           PhaseVocoderTimeStretcher.h \
            PlaySpeedRangeMapper.h
 SOURCES += AudioCallbackPlaySource.cpp \
            AudioCallbackPlayTarget.cpp \
@@ -28,5 +27,4 @@
            AudioJACKTarget.cpp \
            AudioPortAudioTarget.cpp \
            AudioTargetFactory.cpp \
-           PhaseVocoderTimeStretcher.cpp \
            PlaySpeedRangeMapper.cpp
--- a/framework/Document.cpp	Fri Nov 30 17:36:14 2007 +0000
+++ b/framework/Document.cpp	Wed Feb 27 10:32:45 2008 +0000
@@ -29,6 +29,7 @@
 #include "plugin/transform/ModelTransformerFactory.h"
 #include <QApplication>
 #include <QTextStream>
+#include <QSettings>
 #include <iostream>
 
 // For alignment:
@@ -36,6 +37,8 @@
 #include "data/model/SparseTimeValueModel.h"
 #include "data/model/AlignmentModel.h"
 
+//#define DEBUG_DOCUMENT 1
+
 //!!! still need to handle command history, documentRestored/documentModified
 
 Document::Document() :
@@ -53,10 +56,14 @@
     //still refer to it in various places that don't have access to
     //the document, be nice to fix that
 
-//    std::cerr << "\n\nDocument::~Document: about to clear command history" << std::endl;
+#ifdef DEBUG_DOCUMENT
+    std::cerr << "\n\nDocument::~Document: about to clear command history" << std::endl;
+#endif
     CommandHistory::getInstance()->clear();
     
+#ifdef DEBUG_DOCUMENT
     std::cerr << "Document::~Document: about to delete layers" << std::endl;
+#endif
     while (!m_layers.empty()) {
 	deleteLayer(*m_layers.begin(), true);
     }
@@ -73,19 +80,21 @@
 		std::cerr << "Document::~Document: WARNING: Main model is also"
 			  << " in models list!" << std::endl;
 	    } else if (model) {
+                model->aboutToDelete();
 		emit modelAboutToBeDeleted(model);
-                model->aboutToDelete();
 		delete model;
 	    }
 	    m_models.erase(m_models.begin());
 	}
     }
 
-//    std::cerr << "Document::~Document: About to get rid of main model"
-//	      << std::endl;
+#ifdef DEBUG_DOCUMENT
+    std::cerr << "Document::~Document: About to get rid of main model"
+	      << std::endl;
+#endif
     if (m_mainModel) {
+        m_mainModel->aboutToDelete();
         emit modelAboutToBeDeleted(m_mainModel);
-        m_mainModel->aboutToDelete();
     }
 
     emit mainModelChanged(0);
@@ -103,8 +112,10 @@
 
     m_layers.insert(newLayer);
 
-//    std::cerr << "Document::createLayer: Added layer of type " << type
-//              << ", now have " << m_layers.size() << " layers" << std::endl;
+#ifdef DEBUG_DOCUMENT
+    std::cerr << "Document::createLayer: Added layer of type " << type
+              << ", now have " << m_layers.size() << " layers" << std::endl;
+#endif
 
     emit layerAdded(newLayer);
 
@@ -147,8 +158,10 @@
 
     m_layers.insert(newLayer);
 
+#ifdef DEBUG_DOCUMENT
     std::cerr << "Document::createImportedLayer: Added layer of type " << type
               << ", now have " << m_layers.size() << " layers" << std::endl;
+#endif
 
     emit layerAdded(newLayer);
     return newLayer;
@@ -190,24 +203,23 @@
 }
 
 Layer *
-Document::createDerivedLayer(TransformId transform,
-                             Model *inputModel, 
-                             const PluginTransformer::ExecutionContext &context,
-                             QString configurationXml)
+Document::createDerivedLayer(const Transform &transform,
+                             const ModelTransformer::Input &input)
 {
-    Model *newModel = addDerivedModel(transform, inputModel,
-                                      context, configurationXml);
+    QString message;
+    Model *newModel = addDerivedModel(transform, input, message);
     if (!newModel) {
-        // error already printed to stderr by addDerivedModel
-        emit modelGenerationFailed(transform);
+        emit modelGenerationFailed(transform.getIdentifier(), message);
         return 0;
+    } else if (message != "") {
+        emit modelGenerationWarning(transform.getIdentifier(), message);
     }
 
     LayerFactory::LayerTypeSet types =
 	LayerFactory::getInstance()->getValidLayerTypes(newModel);
 
     if (types.empty()) {
-	std::cerr << "WARNING: Document::createLayerForTransformer: no valid display layer for output of transform " << transform.toStdString() << std::endl;
+	std::cerr << "WARNING: Document::createLayerForTransformer: no valid display layer for output of transform " << transform.getIdentifier().toStdString() << std::endl;
 	delete newModel;
 	return 0;
     }
@@ -235,7 +247,8 @@
     if (newLayer) {
 	newLayer->setObjectName(getUniqueLayerName
                                 (TransformFactory::getInstance()->
-                                 getTransformFriendlyName(transform)));
+                                 getTransformFriendlyName
+                                 (transform.getIdentifier())));
     }
 
     emit layerAdded(newLayer);
@@ -259,20 +272,26 @@
     // using one of these.  Carry out this replacement before we
     // delete any of the models.
 
-//    std::cerr << "Document::setMainModel: Have "
-//              << m_layers.size() << " layers" << std::endl;
+#ifdef DEBUG_DOCUMENT
+    std::cerr << "Document::setMainModel: Have "
+              << m_layers.size() << " layers" << std::endl;
+#endif
 
     for (LayerSet::iterator i = m_layers.begin(); i != m_layers.end(); ++i) {
 
 	Layer *layer = *i;
 	Model *model = layer->getModel();
 
-//        std::cerr << "Document::setMainModel: inspecting model "
-//                  << (model ? model->objectName().toStdString() : "(null)") << " in layer "
-//                  << layer->objectName().toStdString() << std::endl;
+#ifdef DEBUG_DOCUMENT
+        std::cerr << "Document::setMainModel: inspecting model "
+                  << (model ? model->objectName().toStdString() : "(null)") << " in layer "
+                  << layer->objectName().toStdString() << std::endl;
+#endif
 
 	if (model == oldMainModel) {
-//            std::cerr << "... it uses the old main model, replacing" << std::endl;
+#ifdef DEBUG_DOCUMENT
+            std::cerr << "... it uses the old main model, replacing" << std::endl;
+#endif
 	    LayerFactory::getInstance()->setModel(layer, m_mainModel);
 	    continue;
 	}
@@ -288,43 +307,60 @@
 	if (m_models[model].source &&
             (m_models[model].source == oldMainModel)) {
 
-//            std::cerr << "... it uses a model derived from the old main model, regenerating" << std::endl;
+#ifdef DEBUG_DOCUMENT
+            std::cerr << "... it uses a model derived from the old main model, regenerating" << std::endl;
+#endif
 
 	    // This model was derived from the previous main
 	    // model: regenerate it.
 	    
-	    TransformId transform = m_models[model].transform;
-            PluginTransformer::ExecutionContext context = m_models[model].context;
+	    const Transform &transform = m_models[model].transform;
+            QString transformId = transform.getIdentifier();
 	    
+            //!!! We have a problem here if the number of channels in
+            //the main model has changed.
+
+            QString message;
 	    Model *replacementModel =
                 addDerivedModel(transform,
-                                m_mainModel,
-                                context,
-                                m_models[model].configurationXml);
+                                ModelTransformer::Input
+                                (m_mainModel, m_models[model].channel),
+                                message);
 	    
 	    if (!replacementModel) {
 		std::cerr << "WARNING: Document::setMainModel: Failed to regenerate model for transform \""
-			  << transform.toStdString() << "\"" << " in layer " << layer << std::endl;
-                if (failedTransformers.find(transform) == failedTransformers.end()) {
+			  << transformId.toStdString() << "\"" << " in layer " << layer << std::endl;
+                if (failedTransformers.find(transformId)
+                    == failedTransformers.end()) {
                     emit modelRegenerationFailed(layer->objectName(),
-                                                 transform);
-                    failedTransformers.insert(transform);
+                                                 transformId,
+                                                 message);
+                    failedTransformers.insert(transformId);
                 }
 		obsoleteLayers.push_back(layer);
 	    } else {
-//                std::cerr << "Replacing model " << model << " (type "
-//                          << typeid(*model).name() << ") with model "
-//                          << replacementModel << " (type "
-//                          << typeid(*replacementModel).name() << ") in layer "
-//                          << layer << " (name " << layer->objectName().toStdString() << ")"
-//                          << std::endl;
+                if (message != "") {
+                    emit modelRegenerationWarning(layer->objectName(),
+                                                  transformId,
+                                                  message);
+                }
+#ifdef DEBUG_DOCUMENT
+                std::cerr << "Replacing model " << model << " (type "
+                          << typeid(*model).name() << ") with model "
+                          << replacementModel << " (type "
+                          << typeid(*replacementModel).name() << ") in layer "
+                          << layer << " (name " << layer->objectName().toStdString() << ")"
+                          << std::endl;
+#endif
                 RangeSummarisableTimeValueModel *rm =
                     dynamic_cast<RangeSummarisableTimeValueModel *>(replacementModel);
+#ifdef DEBUG_DOCUMENT
                 if (rm) {
                     std::cerr << "new model has " << rm->getChannelCount() << " channels " << std::endl;
                 } else {
                     std::cerr << "new model is not a RangeSummarisableTimeValueModel!" << std::endl;
                 }
+#endif
 		setModel(layer, replacementModel);
 	    }
 	}	    
@@ -335,24 +371,36 @@
     }
 
     for (ModelMap::iterator i = m_models.begin(); i != m_models.end(); ++i) {
-        if (oldMainModel &&
-            (i->first->getAlignmentReference() == oldMainModel)) {
+
+        if (m_autoAlignment) {
+
+            alignModel(i->first);
+
+        } else if (oldMainModel &&
+                   (i->first->getAlignmentReference() == oldMainModel)) {
+
             alignModel(i->first);
         }
     }
 
+    if (oldMainModel) {
+        oldMainModel->aboutToDelete();
+        emit modelAboutToBeDeleted(oldMainModel);
+    }
+
+    if (m_autoAlignment) {
+        alignModel(m_mainModel);
+    }
+
     emit mainModelChanged(m_mainModel);
 
-    // we already emitted modelAboutToBeDeleted for this
     delete oldMainModel;
 }
 
 void
-Document::addDerivedModel(TransformId transform,
-                          Model *inputModel,
-                          const PluginTransformer::ExecutionContext &context,
-                          Model *outputModelToAdd,
-                          QString configurationXml)
+Document::addDerivedModel(const Transform &transform,
+                          const ModelTransformer::Input &input,
+                          Model *outputModelToAdd)
 {
     if (m_models.find(outputModelToAdd) != m_models.end()) {
 	std::cerr << "WARNING: Document::addDerivedModel: Model already added"
@@ -360,16 +408,17 @@
 	return;
     }
 
-//    std::cerr << "Document::addDerivedModel: source is " << inputModel << " \"" << inputModel->objectName().toStdString() << "\"" << std::endl;
+#ifdef DEBUG_DOCUMENT
+    std::cerr << "Document::addDerivedModel: source is " << input.getModel() << " \"" << input.getModel()->objectName().toStdString() << "\"" << std::endl;
+#endif
 
     ModelRecord rec;
-    rec.source = inputModel;
+    rec.source = input.getModel();
+    rec.channel = input.getChannel();
     rec.transform = transform;
-    rec.context = context;
-    rec.configurationXml = configurationXml;
     rec.refcount = 0;
 
-    outputModelToAdd->setSourceModel(inputModel);
+    outputModelToAdd->setSourceModel(input.getModel());
 
     m_models[outputModelToAdd] = rec;
 
@@ -388,7 +437,6 @@
 
     ModelRecord rec;
     rec.source = 0;
-    rec.transform = "";
     rec.refcount = 0;
 
     m_models[model] = rec;
@@ -399,29 +447,41 @@
 }
 
 Model *
-Document::addDerivedModel(TransformId transform,
-                          Model *inputModel,
-                          const PluginTransformer::ExecutionContext &context,
-                          QString configurationXml)
+Document::addDerivedModel(const Transform &transform,
+                          const ModelTransformer::Input &input,
+                          QString &message)
 {
     Model *model = 0;
 
     for (ModelMap::iterator i = m_models.begin(); i != m_models.end(); ++i) {
 	if (i->second.transform == transform &&
-	    i->second.source == inputModel && 
-            i->second.context == context &&
-            i->second.configurationXml == configurationXml) {
+	    i->second.source == input.getModel() && 
+            i->second.channel == input.getChannel()) {
 	    return i->first;
 	}
     }
 
     model = ModelTransformerFactory::getInstance()->transform
-	(transform, inputModel, context, configurationXml);
+        (transform, input, message);
+
+    // The transform we actually used was presumably identical to the
+    // one asked for, except that the version of the plugin may
+    // differ.  It's possible that the returned message contains a
+    // warning about this; that doesn't concern us here, but we do
+    // need to ensure that the transform we remember is correct for
+    // what was actually applied, with the current plugin version.
+
+    Transform applied = transform;
+    applied.setPluginVersion
+        (TransformFactory::getInstance()->
+         getDefaultTransformFor(transform.getIdentifier(),
+                                lrintf(transform.getSampleRate()))
+         .getPluginVersion());
 
     if (!model) {
-	std::cerr << "WARNING: Document::addDerivedModel: no output model for transform " << transform.toStdString() << std::endl;
+	std::cerr << "WARNING: Document::addDerivedModel: no output model for transform " << transform.getIdentifier().toStdString() << std::endl;
     } else {
-	addDerivedModel(transform, inputModel, context, model, configurationXml);
+	addDerivedModel(applied, input, model);
     }
 
     return model;
@@ -474,8 +534,8 @@
 		      << "their source fields appropriately" << std::endl;
 	}
 
+        model->aboutToDelete();
 	emit modelAboutToBeDeleted(model);
-        model->aboutToDelete();
 	m_models.erase(model);
 	delete model;
     }
@@ -494,7 +554,9 @@
 
 	if (force) {
 
+#ifdef DEBUG_DOCUMENT
 	    std::cerr << "(force flag set -- deleting from all views)" << std::endl;
+#endif
 
 	    for (std::set<View *>::iterator j = m_layerViewMap[layer].begin();
 		 j != m_layerViewMap[layer].end(); ++j) {
@@ -581,10 +643,12 @@
 {
     Model *model = layer->getModel();
     if (!model) {
-//	std::cerr << "Document::addLayerToView: Layer (\""
-//                  << layer->objectName().toStdString()
-//                  << "\") with no model being added to view: "
-//                  << "normally you want to set the model first" << std::endl;
+#ifdef DEBUG_DOCUMENT
+	std::cerr << "Document::addLayerToView: Layer (\""
+                  << layer->objectName().toStdString()
+                  << "\") with no model being added to view: "
+                  << "normally you want to set the model first" << std::endl;
+#endif
     } else {
 	if (model != m_mainModel &&
 	    m_models.find(model) == m_models.end()) {
@@ -665,7 +729,7 @@
 }
 
 std::vector<Model *>
-Document::getTransformerInputModels()
+Document::getTransformInputModels()
 {
     std::vector<Model *> models;
 
@@ -690,9 +754,28 @@
 }
 
 bool
+Document::isKnownModel(const Model *model) const
+{
+    if (model == m_mainModel) return true;
+    return (m_models.find(const_cast<Model *>(model)) != m_models.end());
+}
+
+TransformId
+Document::getAlignmentTransformName()
+{
+    QSettings settings;
+    settings.beginGroup("Alignment");
+    TransformId id =
+        settings.value("transform-id",
+                       "vamp:match-vamp-plugin:match:path").toString();
+    settings.endGroup();
+    return id;
+}
+
+bool
 Document::canAlign() 
 {
-    TransformId id = "vamp:match-vamp-plugin:match:path";
+    TransformId id = getAlignmentTransformName();
     TransformFactory *factory = TransformFactory::getInstance();
     return factory->haveTransform(id);
 }
@@ -700,14 +783,27 @@
 void
 Document::alignModel(Model *model)
 {
-    if (!m_mainModel || model == m_mainModel) return;
+    if (!m_mainModel) return;
 
     RangeSummarisableTimeValueModel *rm = 
         dynamic_cast<RangeSummarisableTimeValueModel *>(model);
     if (!rm) return;
 
-    if (rm->getAlignmentReference() == m_mainModel) return;
+    if (rm->getAlignmentReference() == m_mainModel) {
+        std::cerr << "Document::alignModel: model " << rm << " is already aligned to main model " << m_mainModel << std::endl;
+        return;
+    }
     
+    if (model == m_mainModel) {
+        // The reference has an empty alignment to itself.  This makes
+        // it possible to distinguish between the reference and any
+        // unaligned model just by looking at the model itself,
+        // without also knowing what the main model is
+        std::cerr << "Document::alignModel(" << model << "): is main model, setting appropriately" << std::endl;
+        rm->setAlignment(new AlignmentModel(model, model, 0, 0));
+        return;
+    }
+
     // This involves creating three new models:
 
     // 1. an AggregateWaveModel to provide the mixdowns of the main
@@ -737,21 +833,26 @@
 
     Model *aggregate = new AggregateWaveModel(components);
 
-    TransformId id = "vamp:match-vamp-plugin:match:path";
+    TransformId id = "vamp:match-vamp-plugin:match:path"; //!!! configure
     
-    ModelTransformerFactory *factory = ModelTransformerFactory::getInstance();
+    TransformFactory *tf = TransformFactory::getInstance();
 
-    PluginTransformer::ExecutionContext context =
-        factory->getDefaultContextForTransformer(id, aggregate);
-    context.stepSize = context.blockSize/2;
+    Transform transform = tf->getDefaultTransformFor
+        (id, aggregate->getSampleRate());
 
-    QString args = "<plugin param-serialise=\"1\"/>";
+    transform.setStepSize(transform.getBlockSize()/2);
+    transform.setParameter("serialise", 1);
 
-    Model *transformOutput = factory->transform(id, aggregate, context, args);
+    std::cerr << "Document::alignModel: Alignment transform step size " << transform.getStepSize() << ", block size " << transform.getBlockSize() << std::endl;
+
+    ModelTransformerFactory *mtf = ModelTransformerFactory::getInstance();
+
+    QString message;
+    Model *transformOutput = mtf->transform(transform, aggregate, message);
 
     if (!transformOutput) {
-        context.stepSize = 0;
-        transformOutput = factory->transform(id, aggregate, context, args);
+        transform.setStepSize(0);
+        transformOutput = mtf->transform(transform, aggregate, message);
     }
 
     SparseTimeValueModel *path = dynamic_cast<SparseTimeValueModel *>
@@ -759,6 +860,7 @@
 
     if (!path) {
         std::cerr << "Document::alignModel: ERROR: Failed to create alignment path (no MATCH plugin?)" << std::endl;
+        emit alignmentFailed(id, message);
         delete transformOutput;
         delete aggregate;
         return;
@@ -776,6 +878,7 @@
     for (ModelMap::iterator i = m_models.begin(); i != m_models.end(); ++i) {
         alignModel(i->first);
     }
+    alignModel(m_mainModel);
 }
 
 Document::AddLayerCommand::AddLayerCommand(Document *d,
@@ -791,7 +894,9 @@
 
 Document::AddLayerCommand::~AddLayerCommand()
 {
-//    std::cerr << "Document::AddLayerCommand::~AddLayerCommand" << std::endl;
+#ifdef DEBUG_DOCUMENT
+    std::cerr << "Document::AddLayerCommand::~AddLayerCommand" << std::endl;
+#endif
     if (!m_added) {
 	m_d->deleteLayer(m_layer);
     }
@@ -839,7 +944,9 @@
 
 Document::RemoveLayerCommand::~RemoveLayerCommand()
 {
-//    std::cerr << "Document::RemoveLayerCommand::~RemoveLayerCommand" << std::endl;
+#ifdef DEBUG_DOCUMENT
+    std::cerr << "Document::RemoveLayerCommand::~RemoveLayerCommand" << std::endl;
+#endif
     if (!m_added) {
 	m_d->deleteLayer(m_layer);
     }
@@ -926,7 +1033,7 @@
         bool writeModel = true;
         bool haveDerivation = false;
 
-        if (rec.source && rec.transform != "") {
+        if (rec.source && rec.transform.getIdentifier() != "") {
             haveDerivation = true;
         } 
 
@@ -943,33 +1050,8 @@
         }
 
 	if (haveDerivation) {
-
-            QString extentsAttributes;
-            if (rec.context.startFrame != 0 ||
-                rec.context.duration != 0) {
-                extentsAttributes = QString("startFrame=\"%1\" duration=\"%2\" ")
-                    .arg(rec.context.startFrame)
-                    .arg(rec.context.duration);
-            }
-	    
-	    out << indent;
-	    out << QString("  <derivation source=\"%1\" model=\"%2\" channel=\"%3\" domain=\"%4\" stepSize=\"%5\" blockSize=\"%6\" %7windowType=\"%8\" transform=\"%9\"")
-		.arg(XmlExportable::getObjectExportId(rec.source))
-		.arg(XmlExportable::getObjectExportId(i->first))
-                .arg(rec.context.channel)
-                .arg(rec.context.domain)
-                .arg(rec.context.stepSize)
-                .arg(rec.context.blockSize)
-                .arg(extentsAttributes)
-                .arg(int(rec.context.windowType))
-		.arg(XmlExportable::encodeEntities(rec.transform));
-
-            if (rec.configurationXml != "") {
-                out << ">\n    " + indent + rec.configurationXml
-                    + "\n" + indent + "  </derivation>\n";
-            } else {
-                out << "/>\n";
-            }
+            writeBackwardCompatibleDerivation(out, indent + "  ",
+                                              i->first, rec);
 	}
 
         //!!! We should probably own the PlayParameterRepository
@@ -992,4 +1074,82 @@
     out << indent + "</data>\n";
 }
 
+void
+Document::writeBackwardCompatibleDerivation(QTextStream &out, QString indent,
+                                            Model *targetModel,
+                                            const ModelRecord &rec) const
+{
+    // There is a lot of redundancy in the XML we output here, because
+    // we want it to work with older SV session file reading code as
+    // well.
+    //
+    // Formerly, a transform was described using a derivation element
+    // which set out the source and target models, execution context
+    // (step size, input channel etc) and transform id, containing a
+    // plugin element which set out the transform parameters and so
+    // on.  (The plugin element came from a "configurationXml" string
+    // obtained from PluginXml.)
+    // 
+    // This has been replaced by a derivation element setting out the
+    // source and target models and input channel, containing a
+    // transform element which sets out everything in the Transform.
+    //
+    // In order to retain compatibility with older SV code, however,
+    // we have to write out the same stuff into the derivation as
+    // before, and manufacture an appropriate plugin element as well
+    // as the transform element.  In order that newer code knows it's
+    // dealing with a newer format, we will also write an attribute
+    // 'type="transform"' in the derivation element.
 
+    const Transform &transform = rec.transform;
+
+    // Just for reference, this is what we would write if we didn't
+    // have to be backward compatible:
+    //
+    //    out << indent
+    //        << QString("<derivation type=\"transform\" source=\"%1\" "
+    //                   "model=\"%2\" channel=\"%3\">\n")
+    //        .arg(XmlExportable::getObjectExportId(rec.source))
+    //        .arg(XmlExportable::getObjectExportId(targetModel))
+    //        .arg(rec.channel);
+    //
+    //    transform.toXml(out, indent + "  ");
+    //
+    //    out << indent << "</derivation>\n";
+    // 
+    // Unfortunately, we can't just do that.  So we do this...
+
+    QString extentsAttributes;
+    if (transform.getStartTime() != RealTime::zeroTime ||
+        transform.getDuration() != RealTime::zeroTime) {
+        extentsAttributes = QString("startFrame=\"%1\" duration=\"%2\" ")
+            .arg(RealTime::realTime2Frame(transform.getStartTime(),
+                                          targetModel->getSampleRate()))
+            .arg(RealTime::realTime2Frame(transform.getDuration(),
+                                          targetModel->getSampleRate()));
+    }
+	    
+    out << indent;
+    out << QString("<derivation type=\"transform\" source=\"%1\" "
+                   "model=\"%2\" channel=\"%3\" domain=\"%4\" "
+                   "stepSize=\"%5\" blockSize=\"%6\" %7windowType=\"%8\" "
+                   "transform=\"%9\">\n")
+        .arg(XmlExportable::getObjectExportId(rec.source))
+        .arg(XmlExportable::getObjectExportId(targetModel))
+        .arg(rec.channel)
+        .arg(TransformFactory::getInstance()->getTransformInputDomain
+             (transform.getIdentifier()))
+        .arg(transform.getStepSize())
+        .arg(transform.getBlockSize())
+        .arg(extentsAttributes)
+        .arg(int(transform.getWindowType()))
+        .arg(XmlExportable::encodeEntities(transform.getIdentifier()));
+
+    transform.toXml(out, indent + "  ");
+    
+    out << indent << "  "
+        << TransformFactory::getInstance()->getPluginConfigurationXml(transform);
+
+    out << indent << "</derivation>\n";
+}
+
--- a/framework/Document.h	Fri Nov 30 17:36:14 2007 +0000
+++ b/framework/Document.h	Wed Feb 27 10:32:45 2008 +0000
@@ -18,7 +18,6 @@
 
 #include "layer/LayerFactory.h"
 #include "plugin/transform/Transform.h"
-#include "plugin/transform/PluginTransformer.h"//!!!
 #include "plugin/transform/ModelTransformer.h"
 #include "base/Command.h"
 
@@ -114,10 +113,8 @@
      * running the transform and associating the resulting model with
      * the new layer.
      */
-    Layer *createDerivedLayer(TransformId,
-                              Model *inputModel, 
-                              const PluginTransformer::ExecutionContext &context,
-                              QString configurationXml);
+    Layer *createDerivedLayer(const Transform &,
+                              const ModelTransformer::Input &);
 
     /**
      * Delete the given layer, and also its associated model if no
@@ -144,27 +141,26 @@
      */
     const WaveFileModel *getMainModel() const { return m_mainModel; }
 
-    std::vector<Model *> getTransformerInputModels();
+    std::vector<Model *> getTransformInputModels();
+
+    bool isKnownModel(const Model *) const;
 
     /**
      * Add a derived model associated with the given transform,
      * running the transform and returning the resulting model.
      */
-    Model *addDerivedModel(TransformId transform,
-                           Model *inputModel,
-                           const PluginTransformer::ExecutionContext &context,
-                           QString configurationXml);
+    Model *addDerivedModel(const Transform &transform,
+                           const ModelTransformer::Input &input,
+                           QString &returnedMessage);
 
     /**
      * Add a derived model associated with the given transform.  This
      * is necessary to register any derived model that was not created
      * by the document using createDerivedModel or createDerivedLayer.
      */
-    void addDerivedModel(TransformId,
-                         Model *inputModel,
-                         const PluginTransformer::ExecutionContext &context,
-                         Model *outputModelToAdd,
-                         QString configurationXml);
+    void addDerivedModel(const Transform &transform,
+                         const ModelTransformer::Input &input,
+                         Model *outputModelToAdd);
 
     /**
      * Add an imported (non-derived, non-main) model.  This is
@@ -232,8 +228,13 @@
     void mainModelChanged(WaveFileModel *); // emitted after modelAdded
     void modelAboutToBeDeleted(Model *);
 
-    void modelGenerationFailed(QString transformName);
-    void modelRegenerationFailed(QString layerName, QString transformName);
+    void modelGenerationFailed(QString transformName, QString message);
+    void modelGenerationWarning(QString transformName, QString message);
+    void modelRegenerationFailed(QString layerName, QString transformName,
+                                 QString message);
+    void modelRegenerationWarning(QString layerName, QString transformName,
+                                  QString message);
+    void alignmentFailed(QString transformName, QString message);
 
 protected:
     void releaseModel(Model *model);
@@ -267,10 +268,14 @@
 	// transform name is set but source is NULL, then there was a
 	// transform involved but the (target) model has been modified
 	// since being generated from it.
+        
+        // This does not use ModelTransformer::Input, because it would
+        // be confusing to have Input objects hanging around with NULL
+        // models in them.
+
 	const Model *source;
-	TransformId transform;
-        PluginTransformer::ExecutionContext context;
-        QString configurationXml;
+        int channel;
+        Transform transform;
 
 	// Count of the number of layers using this model.
 	int refcount;
@@ -292,7 +297,7 @@
     protected:
 	Document *m_d;
 	View *m_view; // I don't own this
-	Layer *m_layer; // Document owns this, but I determine its lifespans
+	Layer *m_layer; // Document owns this, but I determine its lifespan
 	QString m_name;
 	bool m_added;
     };
@@ -322,6 +327,10 @@
     void removeFromLayerViewMap(Layer *, View *);
 
     QString getUniqueLayerName(QString candidate);
+    void writeBackwardCompatibleDerivation(QTextStream &, QString, Model *,
+                                           const ModelRecord &) const;
+
+    static TransformId getAlignmentTransformName();
     
     /**
      * And these are the layers.  We also control the lifespans of
--- a/framework/MainWindowBase.cpp	Fri Nov 30 17:36:14 2007 +0000
+++ b/framework/MainWindowBase.cpp	Wed Feb 27 10:32:45 2008 +0000
@@ -278,6 +278,28 @@
     if (m_paneStack) currentPane = m_paneStack->getCurrentPane();
     if (currentPane) currentLayer = currentPane->getSelectedLayer();
 
+    bool havePrevPane = false, haveNextPane = false;
+    bool havePrevLayer = false, haveNextLayer = false;
+
+    if (currentPane) {
+        for (int i = 0; i < m_paneStack->getPaneCount(); ++i) {
+            if (m_paneStack->getPane(i) == currentPane) {
+                if (i > 0) havePrevPane = true;
+                if (i < m_paneStack->getPaneCount()-1) haveNextPane = true;
+                break;
+            }
+        }
+        if (currentLayer) {
+            for (int i = 0; i < currentPane->getLayerCount(); ++i) {
+                if (currentPane->getLayer(i) == currentLayer) {
+                    if (i > 0) havePrevLayer = true;
+                    if (i < currentPane->getLayerCount()-1) haveNextLayer = true;
+                    break;
+                }
+            }
+        }
+    }        
+
     bool haveCurrentPane =
         (currentPane != 0);
     bool haveCurrentLayer =
@@ -322,7 +344,7 @@
     emit canPlay(havePlayTarget);
     emit canFfwd(true);
     emit canRewind(true);
-    emit canPaste(haveCurrentEditableLayer && haveClipboardContents);
+    emit canPaste(haveClipboardContents);
     emit canInsertInstant(haveCurrentPane);
     emit canInsertInstantsAtBoundaries(haveCurrentPane && haveSelection);
     emit canRenumberInstants(haveCurrentTimeInstantsLayer && haveSelection);
@@ -330,6 +352,10 @@
     emit canClearSelection(haveSelection);
     emit canEditSelection(haveSelection && haveCurrentEditableLayer);
     emit canSave(m_sessionFile != "" && m_documentModified);
+    emit canSelectPreviousPane(havePrevPane);
+    emit canSelectNextPane(haveNextPane);
+    emit canSelectPreviousLayer(havePrevLayer);
+    emit canSelectNextLayer(haveNextLayer);
 }
 
 void
@@ -423,9 +449,18 @@
     }
 
     Model *prevPlaybackModel = m_viewManager->getPlaybackModel();
-    int frame = m_playSource->getCurrentPlayingFrame();
 
-    std::cerr << "playing frame (in ref model) = " << frame << std::endl;
+    // What we want here is not the currently playing frame (unless we
+    // are about to clear out the audio playback buffers -- which may
+    // or may not be possible, depending on the audio driver).  What
+    // we want is the frame that was last committed to the soundcard
+    // buffers, as the audio driver will continue playing up to that
+    // frame before switching to whichever one we decide we want to
+    // switch to, regardless of our efforts.
+
+    int frame = m_playSource->getCurrentBufferedFrame();
+
+//    std::cerr << "currentPaneChanged: current frame (in ref model) = " << frame << std::endl;
 
     View::ModelSet soloModels = p->getModels();
     
@@ -462,11 +497,6 @@
     m_playSource->setSoloModelSet(soloModels);
 
     if (a && b && (a != b)) {
-/*!!!
-        int rframe = a->alignToReference(frame);
-        int bframe = b->alignFromReference(rframe);
-        if (m_playSource->isPlaying()) m_playSource->play(bframe);
-*/
         if (m_playSource->isPlaying()) m_playSource->play(frame);
     }
 }
@@ -546,7 +576,7 @@
 
     for (MultiSelection::SelectionList::iterator i = selections.begin();
          i != selections.end(); ++i) {
-        layer->copy(*i, clipboard);
+        layer->copy(currentPane, *i, clipboard);
         layer->deleteSelection(*i);
     }
 
@@ -569,7 +599,7 @@
 
     for (MultiSelection::SelectionList::iterator i = selections.begin();
          i != selections.end(); ++i) {
-        layer->copy(*i, clipboard);
+        layer->copy(currentPane, *i, clipboard);
     }
 }
 
@@ -579,30 +609,38 @@
     Pane *currentPane = m_paneStack->getCurrentPane();
     if (!currentPane) return;
 
-    //!!! if we have no current layer, we should create one of the most
-    // appropriate type
-
     Layer *layer = currentPane->getSelectedLayer();
-    if (!layer) return;
 
     Clipboard &clipboard = m_viewManager->getClipboard();
-    Clipboard::PointList contents = clipboard.getPoints();
-/*
-    long minFrame = 0;
-    bool have = false;
-    for (int i = 0; i < contents.size(); ++i) {
-        if (!contents[i].haveFrame()) continue;
-        if (!have || contents[i].getFrame() < minFrame) {
-            minFrame = contents[i].getFrame();
-            have = true;
+//    Clipboard::PointList contents = clipboard.getPoints();
+
+    bool inCompound = false;
+
+    if (!layer || !layer->isLayerEditable()) {
+        
+        CommandHistory::getInstance()->startCompoundOperation
+            (tr("Paste"), true);
+
+        // no suitable current layer: create one of the most
+        // appropriate sort
+        LayerFactory::LayerType type =
+            LayerFactory::getInstance()->getLayerTypeForClipboardContents(clipboard);
+        layer = m_document->createEmptyLayer(type);
+
+        if (!layer) {
+            CommandHistory::getInstance()->endCompoundOperation();
+            return;
         }
+
+        m_document->addLayerToView(currentPane, layer);
+        m_paneStack->setCurrentLayer(currentPane, layer);
+
+        inCompound = true;
     }
 
-    long frameOffset = long(m_viewManager->getGlobalCentreFrame()) - minFrame;
+    layer->paste(currentPane, clipboard, 0, true);
 
-    layer->paste(clipboard, frameOffset);
-*/
-    layer->paste(clipboard, 0, true);
+    if (inCompound) CommandHistory::getInstance()->endCompoundOperation();
 }
 
 void
@@ -661,6 +699,8 @@
         return;
     }
 
+    frame = pane->alignFromReference(frame);
+
     Layer *layer = dynamic_cast<TimeInstantLayer *>
         (pane->getSelectedLayer());
 
@@ -697,7 +737,7 @@
             SparseOneDimensionalModel::EditCommand *command =
                 new SparseOneDimensionalModel::EditCommand(sodm, tr("Add Point"));
 
-            if (m_labeller->actingOnPrevPoint()) {
+            if (m_labeller->requiresPrevPoint()) {
 
                 SparseOneDimensionalModel::PointList prevPoints =
                     sodm->getPreviousPoints(frame);
@@ -712,14 +752,14 @@
 
                 m_labeller->setSampleRate(sodm->getSampleRate());
 
-                if (havePrevPoint) {
+                if (m_labeller->actingOnPrevPoint()) {
                     command->deletePoint(prevPoint);
                 }
 
                 m_labeller->label<SparseOneDimensionalModel::Point>
                     (point, havePrevPoint ? &prevPoint : 0);
 
-                if (havePrevPoint) {
+                if (m_labeller->actingOnPrevPoint()) {
                     command->addPoint(prevPoint);
                 }
             }
@@ -766,7 +806,7 @@
 MainWindowBase::FileOpenStatus
 MainWindowBase::open(QString fileOrUrl, AudioFileOpenMode mode)
 {
-    return open(FileSource(fileOrUrl, true), mode);
+    return open(FileSource(fileOrUrl, FileSource::ProgressDialog), mode);
 }
 
 MainWindowBase::FileOpenStatus
@@ -1008,7 +1048,8 @@
     for (PlaylistFileReader::Playlist::const_iterator i = playlist.begin();
          i != playlist.end(); ++i) {
 
-        FileOpenStatus status = openAudio(FileSource(*i, true), mode);
+        FileOpenStatus status = openAudio
+            (FileSource(*i, FileSource::ProgressDialog), mode);
 
         if (status == FileOpenCancelled) {
             return FileOpenCancelled;
@@ -1059,6 +1100,12 @@
         }
         
         SVFileReader reader(m_document, callback, source.getLocation());
+        connect
+            (&reader, SIGNAL(modelRegenerationFailed(QString, QString, QString)),
+             this, SLOT(modelRegenerationFailed(QString, QString, QString)));
+        connect
+            (&reader, SIGNAL(modelRegenerationWarning(QString, QString, QString)),
+             this, SLOT(modelRegenerationWarning(QString, QString, QString)));
         reader.setCurrentPane(pane);
         
         QXmlInputSource inputSource(&file);
@@ -1078,6 +1125,8 @@
             registerLastOpenedFilePath(FileFinder::LayerFile, path); // for file dialog
         }
 
+        return FileOpenSucceeded;
+
     } else {
         
         try {
@@ -1094,6 +1143,8 @@
                 if (newLayer) {
 
                     m_document->addLayerToView(pane, newLayer);
+                    m_paneStack->setCurrentLayer(pane, newLayer);
+
                     m_recentFiles.addFile(source.getLocation());
                     
                     if (!source.isRemote()) {
@@ -1101,7 +1152,7 @@
                             (FileFinder::LayerFile,
                              path); // for file dialog
                     }
-                    
+
                     return FileOpenSucceeded;
                 }
             }
@@ -1112,7 +1163,6 @@
         }
     }
     
-    source.setLeaveLocalFile(true);
     return FileOpenFailed;
 }
 
@@ -1169,7 +1219,7 @@
 MainWindowBase::FileOpenStatus
 MainWindowBase::openSessionFile(QString fileOrUrl)
 {
-    return openSession(FileSource(fileOrUrl, true));
+    return openSession(FileSource(fileOrUrl, FileSource::ProgressDialog));
 }
 
 MainWindowBase::FileOpenStatus
@@ -1192,6 +1242,12 @@
     m_viewManager->clearSelections();
 
     SVFileReader reader(m_document, callback, source.getLocation());
+    connect
+        (&reader, SIGNAL(modelRegenerationFailed(QString, QString, QString)),
+         this, SLOT(modelRegenerationFailed(QString, QString, QString)));
+    connect
+        (&reader, SIGNAL(modelRegenerationWarning(QString, QString, QString)),
+         this, SLOT(modelRegenerationWarning(QString, QString, QString)));
     QXmlInputSource inputSource(&bzFile);
     reader.parse(inputSource);
     
@@ -1282,10 +1338,16 @@
     connect(m_document, SIGNAL(modelAboutToBeDeleted(Model *)),
 	    this, SLOT(modelAboutToBeDeleted(Model *)));
 
-    connect(m_document, SIGNAL(modelGenerationFailed(QString)),
-            this, SLOT(modelGenerationFailed(QString)));
-    connect(m_document, SIGNAL(modelRegenerationFailed(QString, QString)),
-            this, SLOT(modelRegenerationFailed(QString, QString)));
+    connect(m_document, SIGNAL(modelGenerationFailed(QString, QString)),
+            this, SLOT(modelGenerationFailed(QString, QString)));
+    connect(m_document, SIGNAL(modelRegenerationWarning(QString, QString, QString)),
+            this, SLOT(modelRegenerationWarning(QString, QString, QString)));
+    connect(m_document, SIGNAL(modelGenerationFailed(QString, QString)),
+            this, SLOT(modelGenerationFailed(QString, QString)));
+    connect(m_document, SIGNAL(modelRegenerationWarning(QString, QString, QString)),
+            this, SLOT(modelRegenerationWarning(QString, QString, QString)));
+    connect(m_document, SIGNAL(alignmentFailed(QString, QString)),
+            this, SLOT(alignmentFailed(QString, QString)));
 }
 
 bool
@@ -1545,6 +1607,7 @@
     int frame = m_viewManager->getPlaybackFrame();
     ++frame;
 
+    Pane *pane = m_paneStack->getCurrentPane();
     Layer *layer = getSnapLayer();
     size_t sr = getMainModel()->getSampleRate();
 
@@ -1559,8 +1622,10 @@
     } else {
 
         size_t resolution = 0;
-        if (!layer->snapToFeatureFrame(m_paneStack->getCurrentPane(),
-                                       frame, resolution, Layer::SnapRight)) {
+        if (layer->snapToFeatureFrame(m_paneStack->getCurrentPane(),
+                                      frame, resolution, Layer::SnapRight)) {
+            if (pane) frame = pane->alignToReference(frame);
+        } else {
             frame = getMainModel()->getEndFrame();
         }
     }
@@ -1596,6 +1661,7 @@
     int frame = m_viewManager->getPlaybackFrame();
     if (frame > 0) --frame;
 
+    Pane *pane = m_paneStack->getCurrentPane();
     Layer *layer = getSnapLayer();
     size_t sr = getMainModel()->getSampleRate();
     
@@ -1622,8 +1688,11 @@
     } else {
 
         size_t resolution = 0;
-        if (!layer->snapToFeatureFrame(m_paneStack->getCurrentPane(),
-                                       frame, resolution, Layer::SnapLeft)) {
+        if (layer->snapToFeatureFrame(m_paneStack->getCurrentPane(),
+                                      frame, resolution, Layer::SnapLeft)) {
+            
+            if (pane) frame = pane->alignToReference(frame);
+        } else {
             frame = getMainModel()->getStartFrame();
         }
     }
@@ -1812,6 +1881,90 @@
 }
 
 void
+MainWindowBase::previousPane()
+{
+    if (!m_paneStack) return;
+
+    Pane *currentPane = m_paneStack->getCurrentPane();
+    if (!currentPane) return;
+
+    for (int i = 0; i < m_paneStack->getPaneCount(); ++i) {
+        if (m_paneStack->getPane(i) == currentPane) {
+            if (i == 0) return;
+            m_paneStack->setCurrentPane(m_paneStack->getPane(i-1));
+            updateMenuStates();
+            return;
+        }
+    }
+}
+
+void
+MainWindowBase::nextPane()
+{
+    if (!m_paneStack) return;
+
+    Pane *currentPane = m_paneStack->getCurrentPane();
+    if (!currentPane) return;
+
+    for (int i = 0; i < m_paneStack->getPaneCount(); ++i) {
+        if (m_paneStack->getPane(i) == currentPane) {
+            if (i == m_paneStack->getPaneCount()-1) return;
+            m_paneStack->setCurrentPane(m_paneStack->getPane(i+1));
+            updateMenuStates();
+            return;
+        }
+    }
+}
+
+void
+MainWindowBase::previousLayer()
+{
+    //!!! Not right -- pane lists layers in stacking order
+
+    if (!m_paneStack) return;
+
+    Pane *currentPane = m_paneStack->getCurrentPane();
+    if (!currentPane) return;
+
+    Layer *currentLayer = currentPane->getSelectedLayer();
+    if (!currentLayer) return;
+
+    for (int i = 0; i < currentPane->getLayerCount(); ++i) {
+        if (currentPane->getLayer(i) == currentLayer) {
+            if (i == 0) return;
+            m_paneStack->setCurrentLayer(currentPane,
+                                         currentPane->getLayer(i-1));
+            updateMenuStates();
+            return;
+        }
+    }
+}
+
+void
+MainWindowBase::nextLayer()
+{
+    //!!! Not right -- pane lists layers in stacking order
+
+    if (!m_paneStack) return;
+
+    Pane *currentPane = m_paneStack->getCurrentPane();
+    if (!currentPane) return;
+
+    Layer *currentLayer = currentPane->getSelectedLayer();
+    if (!currentLayer) return;
+
+    for (int i = 0; i < currentPane->getLayerCount(); ++i) {
+        if (currentPane->getLayer(i) == currentLayer) {
+            if (i == currentPane->getLayerCount()-1) return;
+            m_paneStack->setCurrentLayer(currentPane,
+                                         currentPane->getLayer(i+1));
+            updateMenuStates();
+            return;
+        }
+    }
+}
+
+void
 MainWindowBase::playbackFrameChanged(unsigned long frame)
 {
     if (!(m_playSource && m_playSource->isPlaying()) || !getMainModel()) return;
--- a/framework/MainWindowBase.h	Fri Nov 30 17:36:14 2007 +0000
+++ b/framework/MainWindowBase.h	Wed Feb 27 10:32:45 2008 +0000
@@ -132,6 +132,10 @@
     void canSpeedUpPlayback(bool);
     void canSlowDownPlayback(bool);
     void canChangePlaybackSpeed(bool);
+    void canSelectPreviousPane(bool);
+    void canSelectNextPane(bool);
+    void canSelectPreviousLayer(bool);
+    void canSelectNextLayer(bool);
     void canSave(bool);
 
 public slots:
@@ -166,6 +170,11 @@
     virtual void deleteCurrentPane();
     virtual void deleteCurrentLayer();
 
+    virtual void previousPane();
+    virtual void nextPane();
+    virtual void previousLayer();
+    virtual void nextLayer();
+
     virtual void playLoopToggled();
     virtual void playSelectionToggled();
     virtual void playSoloToggled();
@@ -213,8 +222,11 @@
     virtual void updateMenuStates();
     virtual void updateDescriptionLabel() = 0;
 
-    virtual void modelGenerationFailed(QString) = 0;
-    virtual void modelRegenerationFailed(QString, QString) = 0;
+    virtual void modelGenerationFailed(QString, QString) = 0;
+    virtual void modelGenerationWarning(QString, QString) = 0;
+    virtual void modelRegenerationFailed(QString, QString, QString) = 0;
+    virtual void modelRegenerationWarning(QString, QString, QString) = 0;
+    virtual void alignmentFailed(QString, QString) = 0;
 
     virtual void rightButtonMenuRequested(Pane *, QPoint point) = 0;
 
--- a/framework/SVFileReader.cpp	Fri Nov 30 17:36:14 2007 +0000
+++ b/framework/SVFileReader.cpp	Wed Feb 27 10:32:45 2008 +0000
@@ -32,6 +32,8 @@
 #include "data/model/TextModel.h"
 #include "data/model/ImageModel.h"
 
+#include "plugin/transform/TransformFactory.h"
+
 #include "view/Pane.h"
 
 #include "Document.h"
@@ -53,6 +55,7 @@
     m_currentDerivedModel(0),
     m_currentDerivedModelId(-1),
     m_currentPlayParameters(0),
+    m_currentTransformSource(0),
     m_datasetSeparator(" "),
     m_inRow(false),
     m_inLayer(false),
@@ -136,6 +139,11 @@
     // row
     // view
     // window
+    // plugin
+    // transform
+    // selections
+    // selection
+    // measurement
 
     if (name == "sv") {
 
@@ -212,6 +220,14 @@
 
         ok = readMeasurement(attributes);
 
+    } else if (name == "transform") {
+        
+        ok = readTransform(attributes);
+
+    } else if (name == "parameter") {
+
+        ok = readParameter(attributes);
+
     } else {
         std::cerr << "WARNING: SV-XML: Unexpected element \""
                   << name.toLocal8Bit().data() << "\"" << std::endl;
@@ -285,25 +301,37 @@
                           << m_currentDerivedModelId
                           << " as target, not regenerating" << std::endl;
             } else {
+                QString message;
                 m_currentDerivedModel = m_models[m_currentDerivedModelId] =
-                    m_document->addDerivedModel(m_currentTransformer,
-                                                m_currentTransformerSource,
-                                                m_currentTransformerContext,
-                                                m_currentTransformerConfiguration);
+                    m_document->addDerivedModel
+                    (m_currentTransform,
+                     ModelTransformer::Input(m_currentTransformSource,
+                                             m_currentTransformChannel),
+                     message);
+                if (!m_currentDerivedModel) {
+                    emit modelRegenerationFailed(tr("(derived model in SV-XML)"),
+                                                 m_currentTransform.getIdentifier(),
+                                                 message);
+                } else if (message != "") {
+                    emit modelRegenerationWarning(tr("(derived model in SV-XML)"),
+                                                  m_currentTransform.getIdentifier(),
+                                                  message);
+                }                    
             }
         } else {
-            m_document->addDerivedModel(m_currentTransformer,
-                                        m_currentTransformerSource,
-                                        m_currentTransformerContext,
-                                        m_currentDerivedModel,
-                                        m_currentTransformerConfiguration);
+            m_document->addDerivedModel
+                (m_currentTransform,
+                 ModelTransformer::Input(m_currentTransformSource,
+                                         m_currentTransformChannel),
+                 m_currentDerivedModel);
         }
 
         m_addedModels.insert(m_currentDerivedModel);
         m_currentDerivedModel = 0;
         m_currentDerivedModelId = -1;
-        m_currentTransformer = "";
-        m_currentTransformerConfiguration = "";
+        m_currentTransformSource = 0;
+        m_currentTransform = Transform();
+        m_currentTransformChannel = -1;
 
     } else if (name == "row") {
 	m_inRow = false;
@@ -413,7 +441,7 @@
         QString path = ff->find(FileFinder::AudioFile,
                                 originalPath, m_location);
 
-        FileSource file(path, true);
+        FileSource file(path, FileSource::ProgressDialog);
         file.waitForStatus();
 
         if (!file.isOK()) {
@@ -713,6 +741,9 @@
 	QString name = attributes.value("name");
 	layer->setObjectName(name);
 
+        QString presentationName = attributes.value("presentationName");
+        layer->setPresentationName(presentationName);
+
 	int modelId;
 	bool modelOk = false;
 	modelId = attributes.value("model").trimmed().toInt(&modelOk);
@@ -993,8 +1024,6 @@
 	return false;
     }
 
-    QString transform = attributes.value("transform");
-
     if (haveModel(modelId)) {
         m_currentDerivedModel = m_models[modelId];
     } else {
@@ -1009,31 +1038,43 @@
     sourceId = attributes.value("source").trimmed().toInt(&sourceOk);
 
     if (sourceOk && haveModel(sourceId)) {
-        m_currentTransformerSource = m_models[sourceId];
+        m_currentTransformSource = m_models[sourceId];
     } else {
-        m_currentTransformerSource = m_document->getMainModel();
+        m_currentTransformSource = m_document->getMainModel();
     }
 
-    m_currentTransformer = transform;
-    m_currentTransformerConfiguration = "";
-
-    m_currentTransformerContext = PluginTransformer::ExecutionContext();
+    m_currentTransform = Transform();
 
     bool ok = false;
     int channel = attributes.value("channel").trimmed().toInt(&ok);
-    if (ok) m_currentTransformerContext.channel = channel;
+    if (ok) m_currentTransformChannel = channel;
+    else m_currentTransformChannel = -1;
 
-    int domain = attributes.value("domain").trimmed().toInt(&ok);
-    if (ok) m_currentTransformerContext.domain = Vamp::Plugin::InputDomain(domain);
+    QString type = attributes.value("type");
+
+    if (type == "transform") {
+        m_currentTransformIsNewStyle = true;
+        return true;
+    } else {
+        m_currentTransformIsNewStyle = false;
+        std::cerr << "NOTE: SV-XML: Reading old-style derivation element"
+                  << std::endl;
+    }
+
+    QString transformId = attributes.value("transform");
+
+    m_currentTransform.setIdentifier(transformId);
 
     int stepSize = attributes.value("stepSize").trimmed().toInt(&ok);
-    if (ok) m_currentTransformerContext.stepSize = stepSize;
+    if (ok) m_currentTransform.setStepSize(stepSize);
 
     int blockSize = attributes.value("blockSize").trimmed().toInt(&ok);
-    if (ok) m_currentTransformerContext.blockSize = blockSize;
+    if (ok) m_currentTransform.setBlockSize(blockSize);
 
     int windowType = attributes.value("windowType").trimmed().toInt(&ok);
-    if (ok) m_currentTransformerContext.windowType = WindowType(windowType);
+    if (ok) m_currentTransform.setWindowType(WindowType(windowType));
+
+    if (!m_currentTransformSource) return true;
 
     QString startFrameStr = attributes.value("startFrame");
     QString durationStr = attributes.value("duration");
@@ -1050,8 +1091,13 @@
         if (!ok) duration = 0;
     }
 
-    m_currentTransformerContext.startFrame = startFrame;
-    m_currentTransformerContext.duration = duration;
+    m_currentTransform.setStartTime
+        (RealTime::frame2RealTime
+         (startFrame, m_currentTransformSource->getSampleRate()));
+
+    m_currentTransform.setDuration
+        (RealTime::frame2RealTime
+         (duration, m_currentTransformSource->getSampleRate()));
 
     return true;
 }
@@ -1119,6 +1165,10 @@
         return false;
     }
 
+    if (!m_currentPlayParameters && m_currentTransformIsNewStyle) {
+        return true;
+    }
+
     QString configurationXml = "<plugin";
     
     for (int i = 0; i < attributes.length(); ++i) {
@@ -1132,13 +1182,49 @@
     if (m_currentPlayParameters) {
         m_currentPlayParameters->setPlayPluginConfiguration(configurationXml);
     } else {
-        m_currentTransformerConfiguration += configurationXml;
+        TransformFactory::getInstance()->
+            setParametersFromPluginConfigurationXml(m_currentTransform,
+                                                    configurationXml);
     }
 
     return true;
 }
 
 bool
+SVFileReader::readTransform(const QXmlAttributes &attributes)
+{
+    if (m_currentDerivedModelId < 0) {
+        std::cerr << "WARNING: SV-XML: Transform found outside derivation" << std::endl;
+        return false;
+    }
+
+    m_currentTransform = Transform();
+    m_currentTransform.setFromXmlAttributes(attributes);
+    return true;
+}
+
+bool
+SVFileReader::readParameter(const QXmlAttributes &attributes)
+{
+    if (m_currentDerivedModelId < 0) {
+        std::cerr << "WARNING: SV-XML: Parameter found outside derivation" << std::endl;
+        return false;
+    }
+
+    QString name = attributes.value("name");
+    if (name == "") {
+        std::cerr << "WARNING: SV-XML: Ignoring nameless transform parameter"
+                  << std::endl;
+        return false;
+    }
+
+    float value = attributes.value("value").trimmed().toFloat();
+
+    m_currentTransform.setParameter(name, value);
+    return true;
+}
+
+bool
 SVFileReader::readSelection(const QXmlAttributes &attributes)
 {
     bool ok;
--- a/framework/SVFileReader.h	Fri Nov 30 17:36:14 2007 +0000
+++ b/framework/SVFileReader.h	Wed Feb 27 10:32:45 2008 +0000
@@ -18,7 +18,6 @@
 
 #include "layer/LayerFactory.h"
 #include "plugin/transform/Transform.h"
-#include "plugin/transform/PluginTransformer.h"
 
 #include <QXmlDefaultHandler>
 
@@ -89,8 +88,19 @@
            a derivation element, and no model element should appear
            for it at all. -->
 
-      <derivation source="0" model="2" transform="..." ...>
-        <plugin id="..." ... />
+      <derivation type="transform" source="0" model="2" channel="-1">
+        <transform id="vamp:soname:pluginid:output" ... />
+      </derivation>
+
+      <!-- Note that the derivation element just described replaces
+           this earlier formulation, which had more attributes in the
+           derivation element and a plugin element describing plugin
+           parameters and properties.  What we actually read and
+           write these days is a horrid composite of the two formats,
+           for backward compatibility reasons. -->
+
+      <derivation source="0" model="2" transform="vamp:soname:pluginid:output" ...>
+        <plugin id="pluginid" ... />
       </derivation>
 
       <!-- The playparameters element lists playback settings for
@@ -151,8 +161,10 @@
  */
 
 
-class SVFileReader : public QXmlDefaultHandler
+class SVFileReader : public QObject, QXmlDefaultHandler
 {
+    Q_OBJECT
+
 public:
     SVFileReader(Document *document,
 		 SVFileReaderPaneCallback &callback,
@@ -182,6 +194,12 @@
     bool error(const QXmlParseException &exception);
     bool fatalError(const QXmlParseException &exception);
 
+signals:
+    void modelRegenerationFailed(QString layerName, QString transformName,
+                                 QString message);
+    void modelRegenerationWarning(QString layerName, QString transformName,
+                                  QString message);
+
 protected:
     bool readWindow(const QXmlAttributes &);
     bool readModel(const QXmlAttributes &);
@@ -195,6 +213,8 @@
     bool readDerivation(const QXmlAttributes &);
     bool readPlayParameters(const QXmlAttributes &);
     bool readPlugin(const QXmlAttributes &);
+    bool readTransform(const QXmlAttributes &);
+    bool readParameter(const QXmlAttributes &);
     bool readSelection(const QXmlAttributes &);
     bool readMeasurement(const QXmlAttributes &);
     void addUnaddedModels();
@@ -216,10 +236,10 @@
     Model *m_currentDerivedModel;
     int m_currentDerivedModelId;
     PlayParameters *m_currentPlayParameters;
-    QString m_currentTransformer;
-    Model *m_currentTransformerSource;
-    PluginTransformer::ExecutionContext m_currentTransformerContext;
-    QString m_currentTransformerConfiguration;
+    Transform m_currentTransform;
+    Model *m_currentTransformSource;
+    int m_currentTransformChannel;
+    bool m_currentTransformIsNewStyle;
     QString m_datasetSeparator;
     bool m_inRow;
     bool m_inLayer;