changeset 328:28c17ce7a6e9 tony_integration

Merge from tonioni branch
author Chris Cannam
date Tue, 28 Jan 2014 15:02:15 +0000
parents c837368b1faf (current diff) d2c13ec0f148 (diff)
children 1e3af7f4fa86
files audioio/AudioCoreAudioTarget.cpp audioio/AudioCoreAudioTarget.h audioio/audioio.pro framework/MainWindowBase.cpp framework/framework.pro
diffstat 18 files changed, 1019 insertions(+), 559 deletions(-) [+]
line wrap: on
line diff
--- a/audioio/AudioCallbackPlaySource.cpp	Thu Dec 12 15:20:14 2013 +0000
+++ b/audioio/AudioCallbackPlaySource.cpp	Tue Jan 28 15:02:15 2014 +0000
@@ -167,7 +167,7 @@
     }
 
 #ifdef DEBUG_AUDIO_PLAY_SOURCE
-    cout << "Adding model with " << modelChannels << " channels at rate " << model->getSampleRate() << endl;
+    cout << "AudioCallbackPlaySource: Adding model with " << modelChannels << " channels at rate " << model->getSampleRate() << endl;
 #endif
 
     if (m_sourceSampleRate == 0) {
--- a/audioio/AudioCoreAudioTarget.cpp	Thu Dec 12 15:20:14 2013 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifdef HAVE_COREAUDIO
-
-#include "AudioCoreAudioTarget.h"
-
-
-
-#endif
--- a/audioio/AudioCoreAudioTarget.h	Thu Dec 12 15:20:14 2013 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,64 +0,0 @@
-/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
-
-/*
-    Sonic Visualiser
-    An audio file viewer and annotation editor.
-    Centre for Digital Music, Queen Mary, University of London.
-    This file copyright 2006 Chris Cannam.
-    
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
-*/
-
-#ifndef _AUDIO_CORE_AUDIO_TARGET_H_
-#define _AUDIO_CORE_AUDIO_TARGET_H_
-
-#ifdef HAVE_COREAUDIO
-
-#include <jack/jack.h>
-#include <vector>
-
-#include <CoreAudio/CoreAudio.h>
-#include <CoreAudio/CoreAudioTypes.h>
-#include <AudioUnit/AUComponent.h>
-#include <AudioUnit/AudioUnitProperties.h>
-#include <AudioUnit/AudioUnitParameters.h>
-#include <AudioUnit/AudioOutputUnit.h>
-
-#include "AudioCallbackPlayTarget.h"
-
-class AudioCallbackPlaySource;
-
-class AudioCoreAudioTarget : public AudioCallbackPlayTarget
-{
-    Q_OBJECT
-
-public:
-    AudioCoreAudioTarget(AudioCallbackPlaySource *source);
-    ~AudioCoreAudioTarget();
-
-    virtual bool isOK() const;
-
-public slots:
-    virtual void sourceModelReplaced();
-
-protected:
-    OSStatus process(void *data,
-		     AudioUnitRenderActionFlags *flags,
-		     const AudioTimeStamp *timestamp,
-		     unsigned int inbus,
-		     unsigned int inframes,
-		     AudioBufferList *ioData);
-
-    int m_bufferSize;
-    int m_sampleRate;
-    int m_latency;
-};
-
-#endif /* HAVE_COREAUDIO */
-
-#endif
-
--- a/audioio/AudioGenerator.cpp	Thu Dec 12 15:20:14 2013 +0000
+++ b/audioio/AudioGenerator.cpp	Tue Jan 28 15:02:15 2014 +0000
@@ -22,14 +22,14 @@
 #include "base/Exceptions.h"
 
 #include "data/model/NoteModel.h"
+#include "data/model/FlexiNoteModel.h"
 #include "data/model/DenseTimeValueModel.h"
+#include "data/model/SparseTimeValueModel.h"
 #include "data/model/SparseOneDimensionalModel.h"
+#include "data/model/NoteData.h"
 
-#include "plugin/RealTimePluginFactory.h"
-#include "plugin/RealTimePluginInstance.h"
-#include "plugin/PluginIdentifier.h"
-#include "plugin/PluginXml.h"
-#include "plugin/api/alsa/seq_event.h"
+#include "ClipMixer.h"
+#include "ContinuousSynth.h"
 
 #include <iostream>
 #include <cmath>
@@ -38,7 +38,7 @@
 #include <QFile>
 
 const size_t
-AudioGenerator::m_pluginBlockSize = 2048;
+AudioGenerator::m_processingBlockSize = 1024;
 
 QString
 AudioGenerator::m_sampleDir = "";
@@ -48,19 +48,15 @@
 AudioGenerator::AudioGenerator() :
     m_sourceSampleRate(0),
     m_targetChannelCount(1),
+	m_waveType(0),
     m_soloing(false)
 {
     initialiseSampleDir();
 
     connect(PlayParameterRepository::getInstance(),
-            SIGNAL(playPluginIdChanged(const Playable *, QString)),
+            SIGNAL(playClipIdChanged(const Playable *, QString)),
             this,
-            SLOT(playPluginIdChanged(const Playable *, QString)));
-
-    connect(PlayParameterRepository::getInstance(),
-            SIGNAL(playPluginConfigurationChanged(const Playable *, QString)),
-            this,
-            SLOT(playPluginConfigurationChanged(const Playable *, QString)));
+            SLOT(playClipIdChanged(const Playable *, QString)));
 }
 
 AudioGenerator::~AudioGenerator()
@@ -125,74 +121,69 @@
 	}
     }
 
-    RealTimePluginInstance *plugin = loadPluginFor(model);
-    if (plugin) {
-        QMutexLocker locker(&m_mutex);
-        m_synthMap[model] = plugin;
-        return true;
+    if (usesClipMixer(model)) {
+        ClipMixer *mixer = makeClipMixerFor(model);
+        if (mixer) {
+            QMutexLocker locker(&m_mutex);
+            m_clipMixerMap[model] = mixer;
+            return true;
+        }
+    }
+
+    if (usesContinuousSynth(model)) {
+        ContinuousSynth *synth = makeSynthFor(model);
+        if (synth) {
+            QMutexLocker locker(&m_mutex);
+            m_continuousSynthMap[model] = synth;
+            return true;
+        }
     }
 
     return false;
 }
 
 void
-AudioGenerator::playPluginIdChanged(const Playable *playable, QString)
+AudioGenerator::playClipIdChanged(const Playable *playable, QString)
 {
     const Model *model = dynamic_cast<const Model *>(playable);
     if (!model) {
-        cerr << "WARNING: AudioGenerator::playPluginIdChanged: playable "
+        cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
                   << playable << " is not a supported model type"
                   << endl;
         return;
     }
 
-    if (m_synthMap.find(model) == m_synthMap.end()) return;
-    
-    RealTimePluginInstance *plugin = loadPluginFor(model);
-    if (plugin) {
+    if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
+
+    ClipMixer *mixer = makeClipMixerFor(model);
+    if (mixer) {
         QMutexLocker locker(&m_mutex);
-        delete m_synthMap[model];
-        m_synthMap[model] = plugin;
+        m_clipMixerMap[model] = mixer;
     }
 }
 
-void
-AudioGenerator::playPluginConfigurationChanged(const Playable *playable,
-                                               QString configurationXml)
+bool
+AudioGenerator::usesClipMixer(const Model *model)
 {
-//    SVDEBUG << "AudioGenerator::playPluginConfigurationChanged" << endl;
-
-    const Model *model = dynamic_cast<const Model *>(playable);
-    if (!model) {
-        cerr << "WARNING: AudioGenerator::playPluginIdChanged: playable "
-                  << playable << " is not a supported model type"
-                  << endl;
-        return;
-    }
-
-    if (m_synthMap.find(model) == m_synthMap.end()) {
-        SVDEBUG << "AudioGenerator::playPluginConfigurationChanged: We don't know about this plugin" << endl;
-        return;
-    }
-
-    RealTimePluginInstance *plugin = m_synthMap[model];
-    if (plugin) {
-        PluginXml(plugin).setParametersFromXml(configurationXml);
-    }
+    bool clip = 
+        (qobject_cast<const SparseOneDimensionalModel *>(model) ||
+         qobject_cast<const NoteModel *>(model) ||
+         qobject_cast<const FlexiNoteModel *>(model));
+    return clip;
 }
 
-void
-AudioGenerator::setSampleDir(RealTimePluginInstance *plugin)
+bool
+AudioGenerator::usesContinuousSynth(const Model *model)
 {
-    if (m_sampleDir != "") {
-        plugin->configure("sampledir", m_sampleDir.toStdString());
-    }
-} 
+    bool cont = 
+        (qobject_cast<const SparseTimeValueModel *>(model));
+    return cont;
+}
 
-RealTimePluginInstance *
-AudioGenerator::loadPluginFor(const Model *model)
+ClipMixer *
+AudioGenerator::makeClipMixerFor(const Model *model)
 {
-    QString pluginId, configurationXml;
+    QString clipId;
 
     const Playable *playable = model;
     if (!playable || !playable->canPlay()) return 0;
@@ -200,67 +191,48 @@
     PlayParameters *parameters =
 	PlayParameterRepository::getInstance()->getPlayParameters(playable);
     if (parameters) {
-        pluginId = parameters->getPlayPluginId();
-        configurationXml = parameters->getPlayPluginConfiguration();
+        clipId = parameters->getPlayClipId();
     }
 
-    if (pluginId == "") return 0;
+    std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
 
-    RealTimePluginInstance *plugin = loadPlugin(pluginId, "");
-    if (!plugin) return 0;
-
-    if (configurationXml != "") {
-        PluginXml(plugin).setParametersFromXml(configurationXml);
-        setSampleDir(plugin);
-    }
-
-    configurationXml = PluginXml(plugin).toXmlString();
-
-    if (parameters) {
-        parameters->setPlayPluginId(pluginId);
-        parameters->setPlayPluginConfiguration(configurationXml);
-    }
-
-    return plugin;
-}
-
-RealTimePluginInstance *
-AudioGenerator::loadPlugin(QString pluginId, QString program)
-{
-    RealTimePluginFactory *factory =
-	RealTimePluginFactory::instanceFor(pluginId);
-    
-    if (!factory) {
-	cerr << "Failed to get plugin factory" << endl;
-	return 0;
-    }
-	
-    RealTimePluginInstance *instance =
-	factory->instantiatePlugin
-	(pluginId, 0, 0, m_sourceSampleRate, m_pluginBlockSize, m_targetChannelCount);
-
-    if (!instance) {
-	cerr << "Failed to instantiate plugin " << pluginId << endl;
+    if (clipId == "") {
+        SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
         return 0;
     }
 
-    setSampleDir(instance);
+    ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
+                                     m_sourceSampleRate,
+                                     m_processingBlockSize);
 
-    for (unsigned int i = 0; i < instance->getParameterCount(); ++i) {
-        instance->setParameterValue(i, instance->getParameterDefault(i));
+    float clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0f); // required
+
+    QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
+
+    if (!mixer->loadClipData(clipPath, clipF0)) {
+        delete mixer;
+        return 0;
     }
-    std::string defaultProgram = instance->getProgram(0, 0);
-    if (defaultProgram != "") {
-//        cerr << "first selecting default program " << defaultProgram << endl;
-        instance->selectProgram(defaultProgram);
-    }
-    if (program != "") {
-//        cerr << "now selecting desired program " << program << endl;
-        instance->selectProgram(program.toStdString());
-    }
-    instance->setIdealChannelCount(m_targetChannelCount); // reset!
 
-    return instance;
+    std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
+
+    return mixer;
+}
+
+ContinuousSynth *
+AudioGenerator::makeSynthFor(const Model *model)
+{
+    const Playable *playable = model;
+    if (!playable || !playable->canPlay()) return 0;
+
+    ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
+                                                 m_sourceSampleRate,
+                                                 m_processingBlockSize,
+                                                 m_waveType);
+
+    std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
+
+    return synth;
 }
 
 void
@@ -272,21 +244,22 @@
 
     QMutexLocker locker(&m_mutex);
 
-    if (m_synthMap.find(sodm) == m_synthMap.end()) return;
+    if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
 
-    RealTimePluginInstance *instance = m_synthMap[sodm];
-    m_synthMap.erase(sodm);
-    delete instance;
+    ClipMixer *mixer = m_clipMixerMap[sodm];
+    m_clipMixerMap.erase(sodm);
+    delete mixer;
 }
 
 void
 AudioGenerator::clearModels()
 {
     QMutexLocker locker(&m_mutex);
-    while (!m_synthMap.empty()) {
-	RealTimePluginInstance *instance = m_synthMap.begin()->second;
-	m_synthMap.erase(m_synthMap.begin());
-	delete instance;
+
+    while (!m_clipMixerMap.empty()) {
+        ClipMixer *mixer = m_clipMixerMap.begin()->second;
+	m_clipMixerMap.erase(m_clipMixerMap.begin());
+	delete mixer;
     }
 }    
 
@@ -294,10 +267,10 @@
 AudioGenerator::reset()
 {
     QMutexLocker locker(&m_mutex);
-    for (PluginMap::iterator i = m_synthMap.begin(); i != m_synthMap.end(); ++i) {
+
+    for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
 	if (i->second) {
-	    i->second->silence();
-	    i->second->discardEvents();
+	    i->second->reset();
 	}
     }
 
@@ -314,15 +287,15 @@
     QMutexLocker locker(&m_mutex);
     m_targetChannelCount = targetChannelCount;
 
-    for (PluginMap::iterator i = m_synthMap.begin(); i != m_synthMap.end(); ++i) {
-	if (i->second) i->second->setIdealChannelCount(targetChannelCount);
+    for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
+	if (i->second) i->second->setChannelCount(targetChannelCount);
     }
 }
 
 size_t
 AudioGenerator::getBlockSize() const
 {
-    return m_pluginBlockSize;
+    return m_processingBlockSize;
 }
 
 void
@@ -387,15 +360,18 @@
 				      buffer, gain, pan, fadeIn, fadeOut);
     }
 
-    bool synthetic = 
-        (qobject_cast<SparseOneDimensionalModel *>(model) ||
-         qobject_cast<NoteModel *>(model));
+    if (usesClipMixer(model)) {
+        return mixClipModel(model, startFrame, frameCount,
+                            buffer, gain, pan);
+    }
 
-    if (synthetic) {
-        return mixSyntheticNoteModel(model, startFrame, frameCount,
-                                     buffer, gain, pan, fadeIn, fadeOut);
+    if (usesContinuousSynth(model)) {
+        return mixContinuousSynthModel(model, startFrame, frameCount,
+                                       buffer, gain, pan);
     }
 
+    std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
+
     return frameCount;
 }
 
@@ -495,18 +471,17 @@
 }
   
 size_t
-AudioGenerator::mixSyntheticNoteModel(Model *model,
-                                      size_t startFrame, size_t frames,
-                                      float **buffer, float gain, float pan,
-                                      size_t /* fadeIn */,
-                                      size_t /* fadeOut */)
+AudioGenerator::mixClipModel(Model *model,
+                             size_t startFrame, size_t frames,
+                             float **buffer, float gain, float pan)
 {
-    RealTimePluginInstance *plugin = m_synthMap[model];
-    if (!plugin) return 0;
+    ClipMixer *clipMixer = m_clipMixerMap[model];
+    if (!clipMixer) return 0;
 
-    size_t latency = plugin->getLatency();
-    size_t blocks = frames / m_pluginBlockSize;
+    size_t blocks = frames / m_processingBlockSize;
     
+    //!!! todo: the below -- it matters
+
     //!!! hang on -- the fact that the audio callback play source's
     //buffer is a multiple of the plugin's buffer size doesn't mean
     //that we always get called for a multiple of it here (because it
@@ -517,197 +492,178 @@
     //callback play source has to use that as a multiple for all the
     //calls to mixModel
 
-    size_t got = blocks * m_pluginBlockSize;
+    size_t got = blocks * m_processingBlockSize;
 
 #ifdef DEBUG_AUDIO_GENERATOR
-    cout << "mixModel [synthetic note]: frames " << frames
+    cout << "mixModel [clip]: frames " << frames
 	      << ", blocks " << blocks << endl;
 #endif
 
-    snd_seq_event_t onEv;
-    onEv.type = SND_SEQ_EVENT_NOTEON;
-    onEv.data.note.channel = 0;
+    ClipMixer::NoteStart on;
+    ClipMixer::NoteEnd off;
 
-    snd_seq_event_t offEv;
-    offEv.type = SND_SEQ_EVENT_NOTEOFF;
-    offEv.data.note.channel = 0;
-    offEv.data.note.velocity = 0;
-    
     NoteOffSet &noteOffs = m_noteOffs[model];
 
+    float **bufferIndexes = new float *[m_targetChannelCount];
+
     for (size_t i = 0; i < blocks; ++i) {
 
-	size_t reqStart = startFrame + i * m_pluginBlockSize;
+	size_t reqStart = startFrame + i * m_processingBlockSize;
 
-        NoteList notes = getNotes(model,
-                                  reqStart + latency,
-                                  reqStart + latency + m_pluginBlockSize);
+        NoteList notes;
+        NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
+        if (exportable) {
+            notes = exportable->getNotes(reqStart,
+                                         reqStart + m_processingBlockSize);
+        }
 
-        Vamp::RealTime blockTime = Vamp::RealTime::frame2RealTime
-	    (startFrame + i * m_pluginBlockSize, m_sourceSampleRate);
+        std::vector<ClipMixer::NoteStart> starts;
+        std::vector<ClipMixer::NoteEnd> ends;
 
 	for (NoteList::const_iterator ni = notes.begin();
              ni != notes.end(); ++ni) {
 
 	    size_t noteFrame = ni->start;
 
-	    if (noteFrame >= latency) noteFrame -= latency;
-
 	    if (noteFrame < reqStart ||
-		noteFrame >= reqStart + m_pluginBlockSize) continue;
+		noteFrame >= reqStart + m_processingBlockSize) continue;
 
 	    while (noteOffs.begin() != noteOffs.end() &&
 		   noteOffs.begin()->frame <= noteFrame) {
 
-                Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
-		    (noteOffs.begin()->frame, m_sourceSampleRate);
+                size_t eventFrame = noteOffs.begin()->frame;
+                if (eventFrame < reqStart) eventFrame = reqStart;
 
-		offEv.data.note.note = noteOffs.begin()->pitch;
+                off.frameOffset = eventFrame - reqStart;
+                off.frequency = noteOffs.begin()->frequency;
 
 #ifdef DEBUG_AUDIO_GENERATOR
-		cerr << "mixModel [synthetic]: sending note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << " pitch " << noteOffs.begin()->pitch << endl;
+		cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
 #endif
 
-		plugin->sendEvent(eventTime, &offEv);
+                ends.push_back(off);
 		noteOffs.erase(noteOffs.begin());
 	    }
 
-            Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
-		(noteFrame, m_sourceSampleRate);
-	    
-            if (ni->isMidiPitchQuantized) {
-                onEv.data.note.note = ni->midiPitch;
-            } else {
-#ifdef DEBUG_AUDIO_GENERATOR
-                cerr << "mixModel [synthetic]: non-pitch-quantized notes are not supported [yet], quantizing" << endl;
-#endif
-                onEv.data.note.note = Pitch::getPitchForFrequency(ni->frequency);
-            }
-
-            onEv.data.note.velocity = ni->velocity;
-
-	    plugin->sendEvent(eventTime, &onEv);
+            on.frameOffset = noteFrame - reqStart;
+            on.frequency = ni->getFrequency();
+            on.level = float(ni->velocity) / 127.0;
+            on.pan = pan;
 
 #ifdef DEBUG_AUDIO_GENERATOR
-	    cout << "mixModel [synthetic]: note at frame " << noteFrame << ", block start " << (startFrame + i * m_pluginBlockSize) << ", resulting time " << eventTime << endl;
+	    cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << endl;
 #endif
 	    
+            starts.push_back(on);
 	    noteOffs.insert
-                (NoteOff(onEv.data.note.note, noteFrame + ni->duration));
+                (NoteOff(on.frequency, noteFrame + ni->duration));
 	}
 
 	while (noteOffs.begin() != noteOffs.end() &&
-	       noteOffs.begin()->frame <=
-	       startFrame + i * m_pluginBlockSize + m_pluginBlockSize) {
+	       noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
 
-            Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
-		(noteOffs.begin()->frame, m_sourceSampleRate);
+            size_t eventFrame = noteOffs.begin()->frame;
+            if (eventFrame < reqStart) eventFrame = reqStart;
 
-	    offEv.data.note.note = noteOffs.begin()->pitch;
+            off.frameOffset = eventFrame - reqStart;
+            off.frequency = noteOffs.begin()->frequency;
 
 #ifdef DEBUG_AUDIO_GENERATOR
-            cerr << "mixModel [synthetic]: sending leftover note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << " pitch " << noteOffs.begin()->pitch << endl;
+            cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
 #endif
 
-	    plugin->sendEvent(eventTime, &offEv);
-	    noteOffs.erase(noteOffs.begin());
+            ends.push_back(off);
+            noteOffs.erase(noteOffs.begin());
 	}
-	
-	plugin->run(blockTime);
-	float **outs = plugin->getAudioOutputBuffers();
 
 	for (size_t c = 0; c < m_targetChannelCount; ++c) {
-#ifdef DEBUG_AUDIO_GENERATOR
-	    cout << "mixModel [synthetic]: adding " << m_pluginBlockSize << " samples from plugin output " << c << endl;
-#endif
+            bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
+        }
 
-	    size_t sourceChannel = (c % plugin->getAudioOutputCount());
+        clipMixer->mix(bufferIndexes, gain, starts, ends);
+    }
 
-	    float channelGain = gain;
-	    if (pan != 0.0) {
-		if (c == 0) {
-		    if (pan > 0.0) channelGain *= 1.0 - pan;
-		} else {
-		    if (pan < 0.0) channelGain *= pan + 1.0;
-		}
-	    }
-
-	    for (size_t j = 0; j < m_pluginBlockSize; ++j) {
-		buffer[c][i * m_pluginBlockSize + j] +=
-		    channelGain * outs[sourceChannel][j];
-	    }
-	}
-    }
+    delete[] bufferIndexes;
 
     return got;
 }
 
-AudioGenerator::NoteList
-AudioGenerator::getNotes(Model *model,
-                         size_t startFrame,
-                         size_t endFrame)
+size_t
+AudioGenerator::mixContinuousSynthModel(Model *model,
+                                        size_t startFrame,
+                                        size_t frames,
+                                        float **buffer,
+                                        float gain, 
+                                        float pan)
 {
-    NoteList notes;
+    ContinuousSynth *synth = m_continuousSynthMap[model];
+    if (!synth) return 0;
 
-    SparseOneDimensionalModel *sodm = 
-        qobject_cast<SparseOneDimensionalModel *>(model);
+    // only type we support here at the moment
+    SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
+    if (stvm->getScaleUnits() != "Hz") return 0;
 
-    if (sodm) {
-        
-	SparseOneDimensionalModel::PointList points =
-	    sodm->getPoints(startFrame, endFrame);
+    size_t blocks = frames / m_processingBlockSize;
 
-	for (SparseOneDimensionalModel::PointList::iterator pli =
-		 points.begin(); pli != points.end(); ++pli) {
+    //!!! todo: see comment in mixClipModel
 
-            notes.push_back
-                (NoteData(pli->frame,
-                          m_sourceSampleRate / 6, // arbitrary short duration
-                          64,   // default pitch
-                          100)); // default velocity
+    size_t got = blocks * m_processingBlockSize;
+
+#ifdef DEBUG_AUDIO_GENERATOR
+    cout << "mixModel [synth]: frames " << frames
+	      << ", blocks " << blocks << endl;
+#endif
+    
+    float **bufferIndexes = new float *[m_targetChannelCount];
+
+    for (size_t i = 0; i < blocks; ++i) {
+
+	size_t reqStart = startFrame + i * m_processingBlockSize;
+
+	for (size_t c = 0; c < m_targetChannelCount; ++c) {
+            bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
         }
 
-        return notes;
+        SparseTimeValueModel::PointList points = 
+            stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
+
+        // by default, repeat last frequency
+        float f0 = 0.f;
+
+        // go straight to the last freq that is genuinely in this range
+        for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
+             itr != points.begin(); ) {
+            --itr;
+            if (itr->frame >= reqStart &&
+                itr->frame < reqStart + m_processingBlockSize) {
+                f0 = itr->value;
+                break;
+            }
+        }
+
+        // if we found no such frequency and the next point is further
+        // away than twice the model resolution, go silent (same
+        // criterion TimeValueLayer uses for ending a discrete curve
+        // segment)
+        if (f0 == 0.f) {
+            SparseTimeValueModel::PointList nextPoints = 
+                stvm->getNextPoints(reqStart + m_processingBlockSize);
+            if (nextPoints.empty() ||
+                nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
+                f0 = -1.f;
+            }
+        }
+
+//        cerr << "f0 = " << f0 << endl;
+
+        synth->mix(bufferIndexes,
+                   gain,
+                   pan,
+                   f0);
     }
 
-    NoteModel *nm = qobject_cast<NoteModel *>(model);
+    delete[] bufferIndexes;
 
-    if (nm) {
-        
-	NoteModel::PointList points =
-	    nm->getPoints(startFrame, endFrame);
-
-        for (NoteModel::PointList::iterator pli =
-		 points.begin(); pli != points.end(); ++pli) {
-
-	    size_t duration = pli->duration;
-            if (duration == 0 || duration == 1) {
-                duration = m_sourceSampleRate / 20;
-            }
-
-            int pitch = lrintf(pli->value);
-
-            int velocity = 100;
-            if (pli->level > 0.f && pli->level <= 1.f) {
-                velocity = lrintf(pli->level * 127);
-            }
-
-            NoteData note(pli->frame,
-                          duration,
-                          pitch,
-                          velocity);
-
-            if (nm->getScaleUnits() == "Hz") {
-                note.frequency = pli->value;
-                note.isMidiPitchQuantized = false;
-            }
-        
-            notes.push_back(note);
-        }
-
-        return notes;
-    }
-
-    return notes;
+    return got;
 }
 
--- a/audioio/AudioGenerator.h	Thu Dec 12 15:20:14 2013 +0000
+++ b/audioio/AudioGenerator.h	Tue Jan 28 15:02:15 2014 +0000
@@ -18,10 +18,12 @@
 
 class Model;
 class NoteModel;
+class FlexiNoteModel;
 class DenseTimeValueModel;
 class SparseOneDimensionalModel;
-class RealTimePluginInstance;
 class Playable;
+class ClipMixer;
+class ContinuousSynth;
 
 #include <QObject>
 #include <QMutex>
@@ -57,7 +59,7 @@
     virtual void clearModels();
 
     /**
-     * Reset playback, clearing plugins and the like.
+     * Reset playback, clearing buffers and the like.
      */
     virtual void reset();
 
@@ -92,37 +94,21 @@
     virtual void clearSoloModelSet();
 
 protected slots:
-    void playPluginIdChanged(const Playable *, QString);
-    void playPluginConfigurationChanged(const Playable *, QString);
+    void playClipIdChanged(const Playable *, QString);
 
 protected:
-    size_t       m_sourceSampleRate;
-    size_t       m_targetChannelCount;
+    size_t m_sourceSampleRate;
+    size_t m_targetChannelCount;
+    size_t m_waveType;
 
     bool m_soloing;
     std::set<Model *> m_soloModelSet;
 
-    struct NoteData {
-
-        NoteData(size_t _start, size_t _dur, int _mp, int _vel) :
-            start(_start), duration(_dur), midiPitch(_mp), frequency(0),
-            isMidiPitchQuantized(true), velocity(_vel) { };
-            
-        size_t start;     // audio sample frame
-        size_t duration;  // in audio sample frames
-        int midiPitch; // 0-127
-        int frequency; // Hz, to be used if isMidiPitchQuantized false
-        bool isMidiPitchQuantized;
-        int velocity;  // MIDI-style 0-127
-    };
-
-    typedef std::vector<NoteData> NoteList;
-    
     struct NoteOff {
 
-        NoteOff(int _p, size_t _f) : pitch(_p), frame(_f) { }
+        NoteOff(float _freq, size_t _frame) : frequency(_freq), frame(_frame) { }
 
-	int pitch;
+        float frequency;
 	size_t frame;
 
 	struct Comparator {
@@ -132,32 +118,43 @@
 	};
     };
 
-    typedef std::map<const Model *, RealTimePluginInstance *> PluginMap;
+
+    typedef std::map<const Model *, ClipMixer *> ClipMixerMap;
 
     typedef std::multiset<NoteOff, NoteOff::Comparator> NoteOffSet;
     typedef std::map<const Model *, NoteOffSet> NoteOffMap;
 
+    typedef std::map<const Model *, ContinuousSynth *> ContinuousSynthMap;
+
     QMutex m_mutex;
-    PluginMap m_synthMap;
+
+    ClipMixerMap m_clipMixerMap;
     NoteOffMap m_noteOffs;
     static QString m_sampleDir;
 
-    virtual RealTimePluginInstance *loadPluginFor(const Model *model);
-    virtual RealTimePluginInstance *loadPlugin(QString id, QString program);
+    ContinuousSynthMap m_continuousSynthMap;
+
+    bool usesClipMixer(const Model *);
+    bool usesContinuousSynth(const Model *);
+
+    ClipMixer *makeClipMixerFor(const Model *model);
+    ContinuousSynth *makeSynthFor(const Model *model);
+
     static void initialiseSampleDir();
-    static void setSampleDir(RealTimePluginInstance *plugin);
 
     virtual size_t mixDenseTimeValueModel
     (DenseTimeValueModel *model, size_t startFrame, size_t frameCount,
      float **buffer, float gain, float pan, size_t fadeIn, size_t fadeOut);
 
-    virtual size_t mixSyntheticNoteModel
+    virtual size_t mixClipModel
     (Model *model, size_t startFrame, size_t frameCount,
-     float **buffer, float gain, float pan, size_t fadeIn, size_t fadeOut);
+     float **buffer, float gain, float pan);
+
+    virtual size_t mixContinuousSynthModel
+    (Model *model, size_t startFrame, size_t frameCount,
+     float **buffer, float gain, float pan);
     
-    NoteList getNotes(Model *model, size_t startFrame, size_t endFrame);
-
-    static const size_t m_pluginBlockSize;
+    static const size_t m_processingBlockSize;
 };
 
 #endif
--- a/audioio/AudioTargetFactory.cpp	Thu Dec 12 15:20:14 2013 +0000
+++ b/audioio/AudioTargetFactory.cpp	Tue Jan 28 15:02:15 2014 +0000
@@ -16,7 +16,6 @@
 #include "AudioTargetFactory.h"
 
 #include "AudioJACKTarget.h"
-#include "AudioCoreAudioTarget.h"
 #include "AudioPortAudioTarget.h"
 #include "AudioPulseAudioTarget.h"
 
@@ -54,10 +53,6 @@
     names.push_back("pulse");
 #endif
 
-#ifdef HAVE_COREAUDIO
-    names.push_back("core");
-#endif
-
 #ifdef HAVE_PORTAUDIO_2_0
     names.push_back("port");
 #endif
@@ -80,10 +75,6 @@
         return QCoreApplication::translate("AudioTargetFactory",
                                            "PulseAudio Server");
     }
-    if (name == "core") {
-        return QCoreApplication::translate("AudioTargetFactory",
-                                           "Core Audio Device");
-    }
     if (name == "port") {
         return QCoreApplication::translate("AudioTargetFactory",
                                            "Default Soundcard Device");
@@ -126,10 +117,6 @@
         if (m_default == "pulse") target = new AudioPulseAudioTarget(source);
 #endif
 
-#ifdef HAVE_COREAUDIO
-        if (m_default == "core") target = new AudioCoreAudioTarget(source);
-#endif
-
 #ifdef HAVE_PORTAUDIO_2_0
         if (m_default == "port") target = new AudioPortAudioTarget(source);
 #endif
@@ -160,15 +147,6 @@
 	delete target;
     }
 #endif
-
-#ifdef HAVE_COREAUDIO
-    target = new AudioCoreAudioTarget(source);
-    if (target->isOK()) return target;
-    else {
-	cerr << "WARNING: AudioTargetFactory::createCallbackTarget: Failed to open CoreAudio target" << endl;
-	delete target;
-    }
-#endif
     
 #ifdef HAVE_PORTAUDIO_2_0
     target = new AudioPortAudioTarget(source);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/ClipMixer.cpp	Tue Jan 28 15:02:15 2014 +0000
@@ -0,0 +1,238 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam, 2006-2014 QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "ClipMixer.h"
+
+#include <sndfile.h>
+#include <cmath>
+
+#include "base/Debug.h"
+
+ClipMixer::ClipMixer(int channels, int sampleRate, int blockSize) :
+    m_channels(channels),
+    m_sampleRate(sampleRate),
+    m_blockSize(blockSize),
+    m_clipData(0)
+{
+}
+
+ClipMixer::~ClipMixer()
+{
+    if (m_clipData) free(m_clipData);
+}
+
+void
+ClipMixer::setChannelCount(int channels)
+{
+    m_channels = channels;
+}
+
+bool
+ClipMixer::loadClipData(QString path, float f0)
+{
+    if (m_clipData) {
+        cerr << "ClipMixer::loadClipData: Already have clip loaded" << endl;
+        return false;
+    }
+
+    SF_INFO info;
+    SNDFILE *file;
+    int sampleCount = 0;
+    float *tmpFrames;
+    size_t i;
+
+    info.format = 0;
+    file = sf_open(path.toLocal8Bit().data(), SFM_READ, &info);
+    if (!file) {
+	cerr << "ClipMixer::loadClipData: Failed to open file path \""
+             << path << "\": " << sf_strerror(file) << endl;
+	return false;
+    }
+
+    tmpFrames = (float *)malloc(info.frames * info.channels * sizeof(float));
+    if (!tmpFrames) {
+        cerr << "ClipMixer::loadClipData: malloc(" << info.frames * info.channels * sizeof(float) << ") failed" << endl;
+        return false;
+    }
+
+    sf_readf_float(file, tmpFrames, info.frames);
+    sf_close(file);
+
+    m_clipData = (float *)malloc(info.frames * sizeof(float));
+    if (!m_clipData) {
+        cerr << "ClipMixer::loadClipData: malloc(" << info.frames * sizeof(float) << ") failed" << endl;
+	free(tmpFrames);
+	return false;
+    }
+
+    for (i = 0; i < info.frames; ++i) {
+	int j;
+	m_clipData[i] = 0.0f;
+	for (j = 0; j < info.channels; ++j) {
+	    m_clipData[i] += tmpFrames[i * info.channels + j];
+	}
+    }
+
+    free(tmpFrames);
+
+    m_clipLength = info.frames;
+    m_clipF0 = f0;
+    m_clipRate = info.samplerate;
+
+    return true;
+}
+
+void
+ClipMixer::reset()
+{
+    m_playing.clear();
+}
+
+float
+ClipMixer::getResampleRatioFor(float frequency)
+{
+    if (!m_clipData) return 1.0;
+    float pitchRatio = m_clipF0 / frequency;
+    float resampleRatio = m_sampleRate / m_clipRate;
+    return pitchRatio * resampleRatio;
+}
+
+int
+ClipMixer::getResampledClipDuration(float frequency)
+{
+    return int(ceil(m_clipLength * getResampleRatioFor(frequency)));
+}
+
+void
+ClipMixer::mix(float **toBuffers, 
+               float gain,
+               std::vector<NoteStart> newNotes, 
+               std::vector<NoteEnd> endingNotes)
+{
+    foreach (NoteStart note, newNotes) {
+        if (note.frequency > 20 && 
+            note.frequency < 5000) {
+            m_playing.push_back(note);
+        }
+    }
+
+    std::vector<NoteStart> remaining;
+
+    float *levels = new float[m_channels];
+
+    foreach (NoteStart note, m_playing) {
+
+        for (int c = 0; c < m_channels; ++c) {
+            levels[c] = gain;
+        }
+        if (note.pan != 0.0 && m_channels == 2) {
+            levels[0] *= 1.0 - note.pan;
+            levels[1] *= note.pan + 1.0;
+        }
+
+        int start = note.frameOffset;
+        int durationHere = m_blockSize;
+        if (start > 0) durationHere = m_blockSize - start;
+
+        bool ending = false;
+
+        foreach (NoteEnd end, endingNotes) {
+            if (end.frequency == note.frequency && 
+                end.frameOffset >= start &&
+                end.frameOffset <= m_blockSize) {
+                ending = true;
+                durationHere = end.frameOffset;
+                if (start > 0) durationHere = end.frameOffset - start;
+                break;
+            }
+        }
+
+        int clipDuration = getResampledClipDuration(note.frequency);
+        if (start + clipDuration > 0) {
+            if (start < 0 && start + clipDuration < durationHere) {
+                durationHere = start + clipDuration;
+            }
+            if (durationHere > 0) {
+                mixNote(toBuffers,
+                        levels,
+                        note.frequency,
+                        start < 0 ? -start : 0,
+                        start > 0 ?  start : 0,
+                        durationHere,
+                        ending);
+            }
+        }
+
+        if (!ending) {
+            NoteStart adjusted = note;
+            adjusted.frameOffset -= m_blockSize;
+            remaining.push_back(adjusted);
+        }
+    }
+
+    delete[] levels;
+
+    m_playing = remaining;
+}
+
+void
+ClipMixer::mixNote(float **toBuffers,
+                   float *levels,
+                   float frequency,
+                   int sourceOffset,
+                   int targetOffset,
+                   int sampleCount,
+                   bool isEnd)
+{
+    if (!m_clipData) return;
+
+    float ratio = getResampleRatioFor(frequency);
+    
+    float releaseTime = 0.01;
+    int releaseSampleCount = round(releaseTime * m_sampleRate);
+    if (releaseSampleCount > sampleCount) {
+        releaseSampleCount = sampleCount;
+    }
+    float releaseFraction = 1.f/releaseSampleCount;
+
+    for (int i = 0; i < sampleCount; ++i) {
+
+        int s = sourceOffset + i;
+
+        float os = s / ratio;
+        int osi = int(floor(os));
+
+        //!!! just linear interpolation for now (same as SV's sample
+        //!!! player). a small sinc kernel would be better and
+        //!!! probably "good enough"
+        float value = 0.f;
+        if (osi < m_clipLength) {
+            value += m_clipData[osi];
+        }
+        if (osi + 1 < m_clipLength) {
+            value += (m_clipData[osi + 1] - m_clipData[osi]) * (os - osi);
+        }
+         
+        if (isEnd && i + releaseSampleCount > sampleCount) {
+            value *= releaseFraction * (sampleCount - i); // linear ramp for release
+        }
+
+        for (int c = 0; c < m_channels; ++c) {
+            toBuffers[c][targetOffset + i] += levels[c] * value;
+        }
+    }
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/ClipMixer.h	Tue Jan 28 15:02:15 2014 +0000
@@ -0,0 +1,89 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    This file copyright 2006 Chris Cannam, 2006-2014 QMUL.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef CLIP_MIXER_H
+#define CLIP_MIXER_H
+
+#include <QString>
+#include <vector>
+
+/**
+ * Mix in synthetic notes produced by resampling a prerecorded
+ * clip. (i.e. this is an implementation of a digital sampler in the
+ * musician's sense.) This can mix any number of notes of arbitrary
+ * frequency, so long as they all use the same sample clip.
+ */
+
+class ClipMixer
+{
+public:
+    ClipMixer(int channels, int sampleRate, int blockSize);
+    ~ClipMixer();
+
+    void setChannelCount(int channels);
+
+    /**
+     * Load a sample clip from a wav file. This can only happen once:
+     * construct a new ClipMixer if you want a different clip.
+     */
+    bool loadClipData(QString clipFilePath, float clipF0);
+
+    void reset(); // discarding any playing notes
+
+    struct NoteStart {
+	int frameOffset; // within current processing block
+	float frequency; // Hz
+	float level; // volume in range (0,1]
+	float pan; // range [-1,1]
+    };
+
+    struct NoteEnd {
+	int frameOffset; // in current processing block
+        float frequency; // matching note start
+    };
+
+    void mix(float **toBuffers, 
+             float gain,
+	     std::vector<NoteStart> newNotes, 
+	     std::vector<NoteEnd> endingNotes);
+
+private:
+    int m_channels;
+    int m_sampleRate;
+    int m_blockSize;
+
+    QString m_clipPath;
+
+    float *m_clipData;
+    int m_clipLength;
+    float m_clipF0;
+    float m_clipRate;
+
+    std::vector<NoteStart> m_playing;
+
+    float getResampleRatioFor(float frequency);
+    int getResampledClipDuration(float frequency);
+
+    void mixNote(float **toBuffers, 
+                 float *levels,
+                 float frequency,
+                 int sourceOffset, // within resampled note
+                 int targetOffset, // within target buffer
+                 int sampleCount,
+                 bool isEnd);
+};
+
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/ContinuousSynth.cpp	Tue Jan 28 15:02:15 2014 +0000
@@ -0,0 +1,149 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#include "ContinuousSynth.h"
+
+#include "base/Debug.h"
+#include "system/System.h"
+
+#include <cmath>
+
+ContinuousSynth::ContinuousSynth(int channels, int sampleRate, int blockSize, int waveType) :
+    m_channels(channels),
+    m_sampleRate(sampleRate),
+    m_blockSize(blockSize),
+    m_prevF0(-1.f),
+    m_phase(0.0),
+    m_wavetype(waveType) // 0: 3 sinusoids, 1: 1 sinusoid, 2: sawtooth, 3: square
+{
+}
+
+ContinuousSynth::~ContinuousSynth()
+{
+}
+
+void
+ContinuousSynth::reset()
+{
+    m_phase = 0;
+}
+
+void
+ContinuousSynth::mix(float **toBuffers, float gain, float pan, float f0)
+{
+    if (f0 == 0.f) f0 = m_prevF0;
+
+    bool wasOn = (m_prevF0 > 0.f);
+    bool nowOn = (f0 > 0.f);
+
+    if (!nowOn && !wasOn) {
+    m_phase = 0;
+    return;
+    }
+
+    int fadeLength = 100; // samples
+
+    float *levels = new float[m_channels];
+    
+    for (int c = 0; c < m_channels; ++c) {
+    levels[c] = gain * 0.5; // scale gain otherwise too loud compared to source
+    }
+    if (pan != 0.0 && m_channels == 2) {
+    levels[0] *= 1.0 - pan;
+    levels[1] *= pan + 1.0;
+    }
+
+//    cerr << "ContinuousSynth::mix: f0 = " << f0 << " (from " << m_prevF0 << "), phase = " << m_phase << endl;
+
+    for (int i = 0; i < m_blockSize; ++i) {
+
+        double fHere = (nowOn ? f0 : m_prevF0);
+
+        if (wasOn && nowOn && (f0 != m_prevF0) && (i < fadeLength)) {
+            // interpolate the frequency shift
+            fHere = m_prevF0 + ((f0 - m_prevF0) * i) / fadeLength;
+        }
+
+        double phasor = (fHere * 2 * M_PI) / m_sampleRate;
+    
+        m_phase = m_phase + phasor;
+
+        int harmonics = (m_sampleRate / 4) / fHere - 1;
+        if (harmonics < 1) harmonics = 1;
+
+        switch (m_wavetype) {
+        case 1:
+            harmonics = 1;
+            break;
+        case 2:
+            break;
+        case 3:
+            break;
+        default:
+            harmonics = 3;
+            break;
+        }
+
+
+        for (int h = 0; h < harmonics; ++h) {
+
+            double v = 0;
+            double hn = 0;
+            double hp = 0;
+
+            switch (m_wavetype) {
+            case 1: // single sinusoid
+                v = sin(m_phase);
+                break;
+            case 2: // sawtooth
+                if (h != 0) {
+                    hn = h + 1;
+                    hp = m_phase * hn;
+                    v = -(1.0 / M_PI) * sin(hp) / hn;
+                } else {
+                    v = 0.5;
+                }
+                break;
+            case 3: // square
+                hn = h*2 + 1;
+                hp = m_phase * hn;
+                v = sin(hp) / hn;
+                break;
+            default: // 3 sinusoids
+                hn = h + 1;
+                hp = m_phase * hn;
+                v = sin(hp) / hn;
+                break;
+            }
+
+            if (!wasOn && i < fadeLength) {
+                // fade in
+                v = v * (i / double(fadeLength));
+            } else if (!nowOn) {
+                // fade out
+                if (i > fadeLength) v = 0;
+                else v = v * (1.0 - (i / double(fadeLength)));
+            }
+
+            for (int c = 0; c < m_channels; ++c) {
+                toBuffers[c][i] += levels[c] * v;
+            }
+        }
+    }    
+
+    m_prevF0 = f0;
+
+    delete[] levels;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/audioio/ContinuousSynth.h	Tue Jan 28 15:02:15 2014 +0000
@@ -0,0 +1,63 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*
+    Sonic Visualiser
+    An audio file viewer and annotation editor.
+    Centre for Digital Music, Queen Mary, University of London.
+    
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License as
+    published by the Free Software Foundation; either version 2 of the
+    License, or (at your option) any later version.  See the file
+    COPYING included with this distribution for more information.
+*/
+
+#ifndef CONTINUOUS_SYNTH_H
+#define CONTINUOUS_SYNTH_H
+
+/**
+ * Mix into a target buffer a signal synthesised so as to sound at a
+ * specific frequency. The frequency may change with each processing
+ * block, or may be switched on or off.
+ */
+
+class ContinuousSynth
+{
+public:
+    ContinuousSynth(int channels, int sampleRate, int blockSize, int waveType);
+    ~ContinuousSynth();
+    
+    void setChannelCount(int channels);
+
+    void reset();
+
+    /**
+     * Mix in a signal to be heard at the given fundamental
+     * frequency. Any oscillator state will be maintained between
+     * process calls so as to provide a continuous sound. The f0 value
+     * may vary between calls.
+     *
+     * Supply f0 equal to 0 if you want to maintain the f0 from the
+     * previous block (without having to remember what it was).
+     *
+     * Supply f0 less than 0 for silence. You should continue to call
+     * this even when the signal is silent if you want to ensure the
+     * sound switches on and off cleanly.
+     */
+    void mix(float **toBuffers,
+         float gain,
+         float pan,
+         float f0);
+
+private:
+    int m_channels;
+    int m_sampleRate;
+    int m_blockSize;
+
+    double m_prevF0;
+    double m_phase;
+
+    int m_wavetype;
+};
+
+#endif
--- a/audioio/audioio.pro	Thu Dec 12 15:20:14 2013 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-TEMPLATE = lib
-
-SV_UNIT_PACKAGES = fftw3f samplerate jack portaudio-2.0 libpulse rubberband
-load(../prf/sv.prf)
-
-CONFIG += sv staticlib qt thread warn_on stl rtti exceptions
-QT -= gui
-
-TARGET = svaudioio
-
-DEPENDPATH += ..
-INCLUDEPATH += . ..
-OBJECTS_DIR = tmp_obj
-MOC_DIR = tmp_moc
-
-HEADERS += AudioCallbackPlaySource.h \
-           AudioCallbackPlayTarget.h \
-           AudioCoreAudioTarget.h \
-           AudioGenerator.h \
-           AudioJACKTarget.h \
-           AudioPortAudioTarget.h \
-           AudioPulseAudioTarget.h \
-           AudioTargetFactory.h \
-           PlaySpeedRangeMapper.h
-SOURCES += AudioCallbackPlaySource.cpp \
-           AudioCallbackPlayTarget.cpp \
-           AudioCoreAudioTarget.cpp \
-           AudioGenerator.cpp \
-           AudioJACKTarget.cpp \
-           AudioPortAudioTarget.cpp \
-           AudioPulseAudioTarget.cpp \
-           AudioTargetFactory.cpp \
-           PlaySpeedRangeMapper.cpp
--- a/framework/Document.cpp	Thu Dec 12 15:20:14 2013 +0000
+++ b/framework/Document.cpp	Tue Jan 28 15:02:15 2014 +0000
@@ -19,6 +19,8 @@
 #include "data/model/WritableWaveFileModel.h"
 #include "data/model/DenseThreeDimensionalModel.h"
 #include "data/model/DenseTimeValueModel.h"
+#include "data/model/FlexiNoteModel.h"
+
 #include "layer/Layer.h"
 #include "widgets/CommandHistory.h"
 #include "base/Command.h"
@@ -27,6 +29,7 @@
 #include "base/PlayParameters.h"
 #include "transform/TransformFactory.h"
 #include "transform/ModelTransformerFactory.h"
+#include "transform/FeatureExtractionModelTransformer.h"
 #include <QApplication>
 #include <QTextStream>
 #include <QSettings>
@@ -38,6 +41,8 @@
 #include "data/model/SparseTimeValueModel.h"
 #include "data/model/AlignmentModel.h"
 
+using std::vector;
+
 //#define DEBUG_DOCUMENT 1
 
 //!!! still need to handle command history, documentRestored/documentModified
@@ -207,56 +212,80 @@
 Document::createDerivedLayer(const Transform &transform,
                              const ModelTransformer::Input &input)
 {
+    Transforms transforms;
+    transforms.push_back(transform);
+    vector<Layer *> layers = createDerivedLayers(transforms, input);
+    if (layers.empty()) return 0;
+    else return layers[0];
+}
+
+vector<Layer *>
+Document::createDerivedLayers(const Transforms &transforms,
+                              const ModelTransformer::Input &input)
+{
     QString message;
-    Model *newModel = addDerivedModel(transform, input, message);
-    if (!newModel) {
-        emit modelGenerationFailed(transform.getIdentifier(), message);
-        return 0;
+    vector<Model *> newModels = addDerivedModels(transforms, input, message);
+
+    if (newModels.empty()) {
+        //!!! This identifier may be wrong!
+        emit modelGenerationFailed(transforms[0].getIdentifier(), message);
+        return vector<Layer *>();
     } else if (message != "") {
-        emit modelGenerationWarning(transform.getIdentifier(), message);
+        //!!! This identifier may be wrong!
+        emit modelGenerationWarning(transforms[0].getIdentifier(), message);
     }
 
-    LayerFactory::LayerTypeSet types =
-	LayerFactory::getInstance()->getValidLayerTypes(newModel);
+    vector<Layer *> layers;
 
-    if (types.empty()) {
-	cerr << "WARNING: Document::createLayerForTransformer: no valid display layer for output of transform " << transform.getIdentifier() << endl;
-        newModel->aboutToDelete();
-        emit modelAboutToBeDeleted(newModel);
-        m_models.erase(newModel);
-	delete newModel;
-	return 0;
+    for (int i = 0; i < (int)newModels.size(); ++i) {
+
+        Model *newModel = newModels[i];
+
+        LayerFactory::LayerTypeSet types =
+            LayerFactory::getInstance()->getValidLayerTypes(newModel);
+
+        if (types.empty()) {
+            cerr << "WARNING: Document::createLayerForTransformer: no valid display layer for output of transform " << transforms[i].getIdentifier() << endl;
+            //!!! inadequate cleanup:
+            newModel->aboutToDelete();
+            emit modelAboutToBeDeleted(newModel);
+            m_models.erase(newModel);
+            delete newModel;
+            return vector<Layer *>();
+        }
+
+        //!!! for now, just use the first suitable layer type
+
+        Layer *newLayer = createLayer(*types.begin());
+        setModel(newLayer, newModel);
+
+        //!!! We need to clone the model when adding the layer, so that it
+        //can be edited without affecting other layers that are based on
+        //the same model.  Unfortunately we can't just clone it now,
+        //because it probably hasn't been completed yet -- the transform
+        //runs in the background.  Maybe the transform has to handle
+        //cloning and cacheing models itself.
+        //
+        // Once we do clone models here, of course, we'll have to avoid
+        // leaking them too.
+        //
+        // We want the user to be able to add a model to a second layer
+        // _while it's still being calculated in the first_ and have it
+        // work quickly.  That means we need to put the same physical
+        // model pointer in both layers, so they can't actually be cloned.
+    
+        if (newLayer) {
+            newLayer->setObjectName(getUniqueLayerName
+                                    (TransformFactory::getInstance()->
+                                     getTransformFriendlyName
+                                     (transforms[i].getIdentifier())));
+        }
+
+        emit layerAdded(newLayer);
+        layers.push_back(newLayer);
     }
 
-    //!!! for now, just use the first suitable layer type
-
-    Layer *newLayer = createLayer(*types.begin());
-    setModel(newLayer, newModel);
-
-    //!!! We need to clone the model when adding the layer, so that it
-    //can be edited without affecting other layers that are based on
-    //the same model.  Unfortunately we can't just clone it now,
-    //because it probably hasn't been completed yet -- the transform
-    //runs in the background.  Maybe the transform has to handle
-    //cloning and cacheing models itself.
-    //
-    // Once we do clone models here, of course, we'll have to avoid
-    // leaking them too.
-    //
-    // We want the user to be able to add a model to a second layer
-    // _while it's still being calculated in the first_ and have it
-    // work quickly.  That means we need to put the same physical
-    // model pointer in both layers, so they can't actually be cloned.
-    
-    if (newLayer) {
-	newLayer->setObjectName(getUniqueLayerName
-                                (TransformFactory::getInstance()->
-                                 getTransformFriendlyName
-                                 (transform.getIdentifier())));
-    }
-
-    emit layerAdded(newLayer);
-    return newLayer;
+    return layers;
 }
 
 void
@@ -503,40 +532,58 @@
                           const ModelTransformer::Input &input,
                           QString &message)
 {
-    Model *model = 0;
-
     for (ModelMap::iterator i = m_models.begin(); i != m_models.end(); ++i) {
-	if (i->second.transform == transform &&
-	    i->second.source == input.getModel() && 
+        if (i->second.transform == transform &&
+            i->second.source == input.getModel() && 
             i->second.channel == input.getChannel()) {
-	    return i->first;
-	}
+            std::cerr << "derived model taken from map " << std::endl;
+            return i->first;
+        }
     }
 
-    model = ModelTransformerFactory::getInstance()->transform
-        (transform, input, message);
+    Transforms tt;
+    tt.push_back(transform);
+    vector<Model *> mm = addDerivedModels(tt, input, message);
+    if (mm.empty()) return 0;
+    else return mm[0];
+}
 
-    // The transform we actually used was presumably identical to the
-    // one asked for, except that the version of the plugin may
-    // differ.  It's possible that the returned message contains a
-    // warning about this; that doesn't concern us here, but we do
-    // need to ensure that the transform we remember is correct for
-    // what was actually applied, with the current plugin version.
+vector<Model *>
+Document::addDerivedModels(const Transforms &transforms,
+                           const ModelTransformer::Input &input,
+                           QString &message)
+{
+    vector<Model *> mm = 
+        ModelTransformerFactory::getInstance()->transformMultiple
+        (transforms, input, message);
 
-    Transform applied = transform;
-    applied.setPluginVersion
-        (TransformFactory::getInstance()->
-         getDefaultTransformFor(transform.getIdentifier(),
-                                lrintf(transform.getSampleRate()))
-         .getPluginVersion());
+    for (int j = 0; j < (int)mm.size(); ++j) {
 
-    if (!model) {
-	cerr << "WARNING: Document::addDerivedModel: no output model for transform " << transform.getIdentifier() << endl;
-    } else {
-	addDerivedModel(applied, input, model);
+        Model *model = mm[j];
+
+        // The transform we actually used was presumably identical to
+        // the one asked for, except that the version of the plugin
+        // may differ.  It's possible that the returned message
+        // contains a warning about this; that doesn't concern us
+        // here, but we do need to ensure that the transform we
+        // remember is correct for what was actually applied, with the
+        // current plugin version.
+
+        Transform applied = transforms[j];
+        applied.setPluginVersion
+            (TransformFactory::getInstance()->
+             getDefaultTransformFor(applied.getIdentifier(),
+                                    lrintf(applied.getSampleRate()))
+             .getPluginVersion());
+
+        if (!model) {
+            cerr << "WARNING: Document::addDerivedModel: no output model for transform " << applied.getIdentifier() << endl;
+        } else {
+            addDerivedModel(applied, input, model);
+        }
     }
-
-    return model;
+	
+    return mm;
 }
 
 void
@@ -690,6 +737,7 @@
     }
 
     LayerFactory::getInstance()->setModel(layer, model);
+	// std::cerr << "layer type: " << LayerFactory::getInstance()->getLayerTypeName(LayerFactory::getInstance()->getLayerType(layer)) << std::endl;
 
     if (previousModel) {
         releaseModel(previousModel);
--- a/framework/Document.h	Thu Dec 12 15:20:14 2013 +0000
+++ b/framework/Document.h	Tue Jan 28 15:02:15 2014 +0000
@@ -19,6 +19,7 @@
 #include "layer/LayerFactory.h"
 #include "transform/Transform.h"
 #include "transform/ModelTransformer.h"
+#include "transform/FeatureExtractionModelTransformer.h"
 #include "base/Command.h"
 
 #include <map>
@@ -117,6 +118,15 @@
                               const ModelTransformer::Input &);
 
     /**
+     * Create and return suitable layers for the given transforms,
+     * which must be identical apart from the output (i.e. must use
+     * the same plugin and configuration). The layers are returned in
+     * the same order as the transformed are supplied.
+     */
+    std::vector<Layer *> createDerivedLayers(const Transforms &,
+                                             const ModelTransformer::Input &);
+
+    /**
      * Delete the given layer, and also its associated model if no
      * longer used by any other layer.  In general, this should be the
      * only method used to delete layers -- doing so directly is a bit
@@ -154,6 +164,15 @@
                            QString &returnedMessage);
 
     /**
+     * Add derived models associated with the given set of related
+     * transforms, running the transforms and returning the resulting
+     * models.
+     */
+    std::vector<Model *> addDerivedModels(const Transforms &transforms,
+                                          const ModelTransformer::Input &input,
+                                          QString &returnedMessage);
+
+    /**
      * Add a derived model associated with the given transform.  This
      * is necessary to register any derived model that was not created
      * by the document using createDerivedModel or createDerivedLayer.
--- a/framework/MainWindowBase.cpp	Thu Dec 12 15:20:14 2013 +0000
+++ b/framework/MainWindowBase.cpp	Tue Jan 28 15:02:15 2014 +0000
@@ -22,6 +22,7 @@
 #include "data/model/WaveFileModel.h"
 #include "data/model/SparseOneDimensionalModel.h"
 #include "data/model/NoteModel.h"
+#include "data/model/FlexiNoteModel.h"
 #include "data/model/Labeller.h"
 #include "data/model/TabularModel.h"
 #include "view/ViewManager.h"
@@ -35,6 +36,7 @@
 #include "layer/SliceableLayer.h"
 #include "layer/ImageLayer.h"
 #include "layer/NoteLayer.h"
+#include "layer/FlexiNoteLayer.h"
 #include "layer/RegionLayer.h"
 
 #include "widgets/ListInputDialog.h"
@@ -309,6 +311,10 @@
         return ff->getOpenFileName(type, m_sessionFile);
     case FileFinder::LayerFileNoMidi:
         return ff->getOpenFileName(type, m_sessionFile);
+    case FileFinder::LayerFileNonSV:
+        return ff->getOpenFileName(type, m_sessionFile);
+    case FileFinder::LayerFileNoMidiNonSV:
+        return ff->getOpenFileName(type, m_sessionFile);
     case FileFinder::SessionOrAudioFile:
         return ff->getOpenFileName(type, m_sessionFile);
     case FileFinder::ImageFile:
@@ -341,6 +347,10 @@
         return ff->getSaveFileName(type, m_sessionFile);
     case FileFinder::LayerFileNoMidi:
         return ff->getSaveFileName(type, m_sessionFile);
+    case FileFinder::LayerFileNonSV:
+        return ff->getSaveFileName(type, m_sessionFile);
+    case FileFinder::LayerFileNoMidiNonSV:
+        return ff->getSaveFileName(type, m_sessionFile);
     case FileFinder::SessionOrAudioFile:
         return ff->getSaveFileName(type, m_sessionFile);
     case FileFinder::ImageFile:
@@ -430,6 +440,7 @@
     bool haveCurrentDurationLayer = 
 	(haveCurrentLayer &&
 	 (dynamic_cast<NoteLayer *>(currentLayer) ||
+	  dynamic_cast<FlexiNoteLayer *>(currentLayer) ||
           dynamic_cast<RegionLayer *>(currentLayer)));
     bool haveCurrentColour3DPlot =
         (haveCurrentLayer &&
@@ -1015,6 +1026,25 @@
         CommandHistory::getInstance()->addCommand(c, false);
         return;
     }
+
+    FlexiNoteModel *fnm = dynamic_cast<FlexiNoteModel *>(layer->getModel());
+    if (fnm) {
+        FlexiNoteModel::Point point(alignedStart,
+                               rm->getValueMinimum(),
+                               alignedDuration,
+                               1.f,
+                               "");
+        FlexiNoteModel::EditCommand *command =
+            new FlexiNoteModel::EditCommand(fnm, tr("Add Point"));
+        command->addPoint(point);
+        command->setName(name);
+        c = command->finish();
+    }
+
+    if (c) {
+        CommandHistory::getInstance()->addCommand(c, false);
+        return;
+    }
 }
 
 void
@@ -2229,7 +2259,11 @@
 MainWindowBase::zoomDefault()
 {
     Pane *currentPane = m_paneStack->getCurrentPane();
-    if (currentPane) currentPane->setZoomLevel(1024);
+    QSettings settings;
+    settings.beginGroup("MainWindow");
+    int zoom = settings.value("zoom-default", 1024).toInt();
+    settings.endGroup();
+    if (currentPane) currentPane->setZoomLevel(zoom);
 }
 
 void
@@ -3091,6 +3125,7 @@
 MainWindowBase::modelAdded(Model *model)
 {
 //    SVDEBUG << "MainWindowBase::modelAdded(" << model << ")" << endl;
+	std::cerr << "\nAdding model " << model->getTypeName() << " to playsource " << std::endl;
     m_playSource->addModel(model);
 }
 
--- a/framework/SVFileReader.cpp	Thu Dec 12 15:20:14 2013 +0000
+++ b/framework/SVFileReader.cpp	Tue Jan 28 15:02:15 2014 +0000
@@ -1271,8 +1271,8 @@
         float gain = attributes.value("gain").toFloat(&ok);
         if (ok) parameters->setPlayGain(gain);
         
-        QString pluginId = attributes.value("pluginId");
-        if (pluginId != "") parameters->setPlayPluginId(pluginId);
+        QString clipId = attributes.value("clipId");
+        if (clipId != "") parameters->setPlayClipId(clipId);
         
         m_currentPlayParameters = parameters;
 
@@ -1291,17 +1291,26 @@
 bool
 SVFileReader::readPlugin(const QXmlAttributes &attributes)
 {
-    if (m_currentDerivedModelId < 0 && !m_currentPlayParameters) {
+    if (m_currentDerivedModelId >= 0) {
+        return readPluginForTransform(attributes);
+    } else if (m_currentPlayParameters) {
+        return readPluginForPlayback(attributes);
+    } else {
         cerr << "WARNING: SV-XML: Plugin found outside derivation or play parameters" << endl;
         return false;
     }
+}
 
-    if (!m_currentPlayParameters && m_currentTransformIsNewStyle) {
+bool
+SVFileReader::readPluginForTransform(const QXmlAttributes &attributes)
+{
+    if (m_currentTransformIsNewStyle) {
+        // Not needed, we have the transform element instead
         return true;
     }
 
     QString configurationXml = "<plugin";
-    
+
     for (int i = 0; i < attributes.length(); ++i) {
         configurationXml += QString(" %1=\"%2\"")
             .arg(attributes.qName(i))
@@ -1310,12 +1319,21 @@
 
     configurationXml += "/>";
 
-    if (m_currentPlayParameters) {
-        m_currentPlayParameters->setPlayPluginConfiguration(configurationXml);
-    } else {
-        TransformFactory::getInstance()->
-            setParametersFromPluginConfigurationXml(m_currentTransform,
-                                                    configurationXml);
+    TransformFactory::getInstance()->
+        setParametersFromPluginConfigurationXml(m_currentTransform,
+                                                configurationXml);
+    return true;
+}
+
+bool
+SVFileReader::readPluginForPlayback(const QXmlAttributes &attributes)
+{
+    // Obsolete but supported for compatibility
+
+    QString ident = attributes.value("identifier");
+    if (ident == "sample_player") {
+        QString clipId = attributes.value("program");
+        if (clipId != "") m_currentPlayParameters->setPlayClipId(clipId);
     }
 
     return true;
--- a/framework/SVFileReader.h	Thu Dec 12 15:20:14 2013 +0000
+++ b/framework/SVFileReader.h	Tue Jan 28 15:02:15 2014 +0000
@@ -222,6 +222,8 @@
     bool readDerivation(const QXmlAttributes &);
     bool readPlayParameters(const QXmlAttributes &);
     bool readPlugin(const QXmlAttributes &);
+    bool readPluginForTransform(const QXmlAttributes &);
+    bool readPluginForPlayback(const QXmlAttributes &);
     bool readTransform(const QXmlAttributes &);
     bool readParameter(const QXmlAttributes &);
     bool readSelection(const QXmlAttributes &);
--- a/framework/framework.pro	Thu Dec 12 15:20:14 2013 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-TEMPLATE = lib
-
-SV_UNIT_PACKAGES = vamp vamp-hostsdk # required because we use transform headers
-load(../prf/sv.prf)
-
-CONFIG += sv staticlib qt thread warn_on stl rtti exceptions
-QT += xml network
-
-TARGET = svframework
-
-DEPENDPATH += ..
-INCLUDEPATH += . ..
-OBJECTS_DIR = tmp_obj
-MOC_DIR = tmp_moc
-
-HEADERS += Document.h \
-           MainWindowBase.h \
-           SVFileReader.h \
-           VersionTester.h
-
-SOURCES += Document.cpp \
-           MainWindowBase.cpp \
-           SVFileReader.cpp \
-           VersionTester.cpp
-
--- a/svapp.pro	Thu Dec 12 15:20:14 2013 +0000
+++ b/svapp.pro	Tue Jan 28 15:02:15 2014 +0000
@@ -4,10 +4,26 @@
 exists(config.pri) {
     include(config.pri)
 }
-win* {
-    !exists(config.pri) {
+!exists(config.pri) {
+    win32-g++ {
+        INCLUDEPATH += ../sv-dependency-builds/win32-mingw/include
+        LIBS += -L../sv-dependency-builds/win32-mingw/lib
+    }
+    win32-msvc* {
+        INCLUDEPATH += ../sv-dependency-builds/win32-msvc/include
+        LIBS += -L../sv-dependency-builds/win32-msvc/lib
+    }
+    macx* {
+        INCLUDEPATH += ../sv-dependency-builds/osx/include
+        LIBS += -L../sv-dependency-builds/osx/lib
+    }
+
+    win* {
         DEFINES += HAVE_PORTAUDIO_2_0
     }
+    macx* {
+        DEFINES += HAVE_COREAUDIO HAVE_PORTAUDIO_2_0
+    }
 }
 
 CONFIG += staticlib qt thread warn_on stl rtti exceptions
@@ -20,30 +36,26 @@
 OBJECTS_DIR = o
 MOC_DIR = o
 
-win32-g++ {
-    INCLUDEPATH += ../sv-dependency-builds/win32-mingw/include
-}
-win32-msvc* {
-    INCLUDEPATH += ../sv-dependency-builds/win32-msvc/include
-}
-
 HEADERS += audioio/AudioCallbackPlaySource.h \
            audioio/AudioCallbackPlayTarget.h \
-           audioio/AudioCoreAudioTarget.h \
            audioio/AudioGenerator.h \
            audioio/AudioJACKTarget.h \
            audioio/AudioPortAudioTarget.h \
            audioio/AudioPulseAudioTarget.h \
            audioio/AudioTargetFactory.h \
+           audioio/ClipMixer.h \
+           audioio/ContinuousSynth.h \
            audioio/PlaySpeedRangeMapper.h
+
 SOURCES += audioio/AudioCallbackPlaySource.cpp \
            audioio/AudioCallbackPlayTarget.cpp \
-           audioio/AudioCoreAudioTarget.cpp \
            audioio/AudioGenerator.cpp \
            audioio/AudioJACKTarget.cpp \
            audioio/AudioPortAudioTarget.cpp \
            audioio/AudioPulseAudioTarget.cpp \
            audioio/AudioTargetFactory.cpp \
+           audioio/ClipMixer.cpp \
+           audioio/ContinuousSynth.cpp \
            audioio/PlaySpeedRangeMapper.cpp
 
 HEADERS += framework/Document.h \