changeset 239:4d1501b27075 integration_library

Merge from branch "sonification"
author mathieub <mathieu.barthet@eecs.qmul.ac.uk>
date Mon, 25 Jul 2011 17:57:59 +0100
parents 1ebd8e13262d (current diff) 1fcee2a1c03e (diff)
children e8961a95b3d6
files
diffstat 4 files changed, 231 insertions(+), 209 deletions(-) [+]
line wrap: on
line diff
--- a/audioio/AudioCallbackPlaySource.cpp	Wed Jun 29 09:57:36 2011 +0100
+++ b/audioio/AudioCallbackPlaySource.cpp	Mon Jul 25 17:57:59 2011 +0100
@@ -24,7 +24,9 @@
 #include "data/model/DenseTimeValueModel.h"
 #include "data/model/WaveFileModel.h"
 #include "data/model/SparseOneDimensionalModel.h"
+#include "data/model/NoteModel.h"
 #include "plugin/RealTimePluginInstance.h"
+#include "base/Debug.h"
 
 #include "AudioCallbackPlayTarget.h"
 
@@ -69,6 +71,8 @@
     m_auditioningPluginBypassed(false),
     m_playStartFrame(0),
     m_playStartFramePassed(false),
+    m_exampleNotes(0),
+    m_examplePlaybackFrame(0),
     m_timeStretcher(0),
     m_monoStretcher(0),
     m_stretchRatio(1.0),
@@ -115,6 +119,8 @@
     }
 
     clearModels();
+
+    delete m_exampleNotes;
     
     if (m_readBuffers != m_writeBuffers) {
 	delete m_readBuffers;
@@ -990,6 +996,39 @@
 }
 
 void
+AudioCallbackPlaySource::queueExampleNote(int midiPitch)
+{
+    SVDEBUG << "AudioCallbackPlaySource::queueExampleNote " << midiPitch << endl;
+    
+    size_t rate = getTargetSampleRate();
+    if (!rate) return;
+
+    Note n(m_examplePlaybackFrame,
+           midiPitch,
+           rate / 2, // half a second
+           0,
+           "");
+
+    NoteModel *newNoteModel = 0;
+
+    if (!m_exampleNotes) {
+        // do this outside mutex -- adding the playable and the model
+        // both call back on us into functions that need to lock
+        newNoteModel = new NoteModel(rate, 1, false);
+        PlayParameterRepository::getInstance()->addPlayable(newNoteModel);
+        m_audioGenerator->addModel(newNoteModel);
+        m_exampleNotes = newNoteModel;
+    }
+
+    m_mutex.lock();
+    m_exampleNotes->addPoint(n);
+    m_mutex.unlock();
+
+    SVDEBUG << "AudioCallbackPlaySource::queueExampleNote: Added note at frame "
+            << n.frame << endl;
+}
+
+void
 AudioCallbackPlaySource::setSoloModelSet(std::set<Model *> s)
 {
     m_audioGenerator->setSoloModelSet(s);
@@ -1062,6 +1101,52 @@
 }
 
 size_t
+AudioCallbackPlaySource::mixExampleModel(size_t count, float **buffer)
+{
+    SVDEBUG << "AudioCallbackPlaySource::mixExampleModel" << endl;
+
+    if (!m_exampleNotes || m_exampleNotes->isEmpty()) {
+        return 0;
+    }
+
+    SVDEBUG << "AudioCallbackPlaySource::mixExampleModel: Model non-empty; m_examplePlaybackFrame is " << m_examplePlaybackFrame << " and count " << count << endl;
+
+    QMutexLocker locker(&m_mutex);
+
+    size_t n = 0;
+
+    n = m_audioGenerator->mixModel(m_exampleNotes,
+                                   m_examplePlaybackFrame,
+                                   count,
+                                   buffer,
+                                   0,
+                                   0);
+
+    m_examplePlaybackFrame += n;
+
+    // prune notes that have finished
+    while (1) {
+        const NoteModel::PointList &points = m_exampleNotes->getPoints();
+        if (!points.empty()) {
+            NoteModel::Point p(*points.begin());
+            if (p.frame + p.duration < m_examplePlaybackFrame) {
+                m_exampleNotes->deletePoint(p);
+                continue;
+            }
+        }
+        break;
+    }
+
+    SVDEBUG << "AudioCallbackPlaySource::mixExampleModel: done, got "
+            << n << " frames for new m_examplePlaybackFrame of "
+            << m_examplePlaybackFrame << ", "
+            << m_exampleNotes->getPoints().size() << " queued notes remain"
+            << endl;
+
+    return n;
+}
+
+size_t
 AudioCallbackPlaySource::getSourceSamples(size_t ucount, float **buffer)
 {
     int count = ucount;
@@ -1075,7 +1160,7 @@
 		buffer[ch][i] = 0.0;
 	    }
 	}
-	return 0;
+	return mixExampleModel(ucount, buffer);
     }
 
 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING
@@ -1180,6 +1265,8 @@
 #endif
 
         m_condition.wakeAll();
+        
+        (void)mixExampleModel(got, buffer);
 
 	return got;
     }
@@ -1273,6 +1360,8 @@
 
     m_condition.wakeAll();
 
+    (void)mixExampleModel(count, buffer);
+
     return count;
 }
 
--- a/audioio/AudioCallbackPlaySource.h	Wed Jun 29 09:57:36 2011 +0100
+++ b/audioio/AudioCallbackPlaySource.h	Mon Jul 25 17:57:59 2011 +0100
@@ -38,6 +38,7 @@
 }
 
 class Model;
+class NoteModel;
 class ViewManagerBase;
 class AudioGenerator;
 class PlayParameters;
@@ -227,6 +228,12 @@
     void setAuditioningEffect(Auditionable *plugin);
 
     /**
+     * Request spontaneous playback of a single short note of the
+     * given pitch.
+     */
+    void queueExampleNote(int midiPitch);
+
+    /**
      * Specify that only the given set of models should be played.
      */
     void setSoloModelSet(std::set<Model *>s);
@@ -305,6 +312,8 @@
     size_t                            m_playStartFrame;
     bool                              m_playStartFramePassed;
     RealTime                          m_playStartedAt;
+    NoteModel                        *m_exampleNotes;
+    size_t                            m_examplePlaybackFrame;
 
     RingBuffer<float> *getWriteRingBuffer(size_t c) {
 	if (m_writeBuffers && c < m_writeBuffers->size()) {
@@ -345,6 +354,11 @@
     // frame argument passed in, in the case of looping).
     size_t mixModels(size_t &frame, size_t count, float **buffers);
 
+    // Called from getSourceSamples, thus in play thread rather than
+    // fill thread.  Return the number of frames written, which will
+    // be count or fewer.
+    size_t mixExampleModel(size_t count, float **buffers);
+
     // Called from getSourceSamples.
     void applyAuditioningEffect(size_t count, float **buffers);
 
--- a/audioio/AudioGenerator.cpp	Wed Jun 29 09:57:36 2011 +0100
+++ b/audioio/AudioGenerator.cpp	Mon Jul 25 17:57:59 2011 +0100
@@ -390,14 +390,14 @@
     SparseOneDimensionalModel *sodm = dynamic_cast<SparseOneDimensionalModel *>
 	(model);
     if (sodm) {
-	return mixSparseOneDimensionalModel(sodm, startFrame, frameCount,
-					    buffer, gain, pan, fadeIn, fadeOut);
+	return mixSparseModel(sodm, startFrame, frameCount,
+                              buffer, gain, pan, fadeIn, fadeOut);
     }
 
     NoteModel *nm = dynamic_cast<NoteModel *>(model);
     if (nm) {
-	return mixNoteModel(nm, startFrame, frameCount,
-			    buffer, gain, pan, fadeIn, fadeOut);
+	return mixSparseModel(nm, startFrame, frameCount,
+                              buffer, gain, pan, fadeIn, fadeOut);
     }
 
     return frameCount;
@@ -498,15 +498,93 @@
     return got;
 }
   
+AudioGenerator::Notes
+AudioGenerator::getNotesFromModel(Model *model,
+                                  size_t startFrame,
+                                  size_t frameCount,
+                                  size_t latency)
+{
+    Notes notes;
+
+    Note n;
+    n.pitch = 64;
+    n.duration = 0;
+    n.velocity = 100;
+
+    SparseOneDimensionalModel *sodm =
+        qobject_cast<SparseOneDimensionalModel *>(model);
+    
+    if (sodm) {
+
+	SparseOneDimensionalModel::PointList points =
+	    sodm->getPoints(startFrame + latency,
+                            startFrame + frameCount + latency);
+        
+	for (SparseOneDimensionalModel::PointList::iterator pli =
+		 points.begin(); pli != points.end(); ++pli) {
+            size_t frame = pli->frame;
+            if (frame > latency) frame -= latency;
+            if (frame < startFrame || frame >= startFrame + frameCount) {
+                continue;
+            }
+            n.frame = frame;
+            notes.push_back(n);
+        }
+    }
+
+    NoteModel *nm =
+        qobject_cast<NoteModel *>(model);
+
+    if (nm) {
+
+	NoteModel::PointList points =
+	    nm->getPoints(startFrame + latency,
+                          startFrame + frameCount + latency);
+        
+	for (NoteModel::PointList::iterator pli =
+		 points.begin(); pli != points.end(); ++pli) {
+
+            size_t frame = pli->frame;
+            if (frame > latency) frame -= latency;
+            if (frame < startFrame || frame >= startFrame + frameCount) {
+                continue;
+            }
+
+            n.frame = frame;
+            n.duration = pli->duration;
+            if (n.duration == 1) n.duration = m_sourceSampleRate / 20;
+
+            if (nm->getScaleUnits() == "Hz") {
+                n.pitch = Pitch::getPitchForFrequency(pli->value);
+            } else {
+                n.pitch = lrintf(pli->value);
+            }
+
+            if (pli->level > 0.f && pli->level <= 1.f) {
+                n.velocity = lrintf(pli->level * 127);
+            }
+
+            notes.push_back(n);
+        }
+    }
+
+    return notes;
+}
+    
 size_t
-AudioGenerator::mixSparseOneDimensionalModel(SparseOneDimensionalModel *sodm,
-					     size_t startFrame, size_t frames,
-					     float **buffer, float gain, float pan,
-					     size_t /* fadeIn */,
-					     size_t /* fadeOut */)
+AudioGenerator::mixSparseModel(Model *model,
+                               size_t startFrame, size_t frames,
+                               float **buffer, float gain, float pan,
+                               size_t /* fadeIn */,
+                               size_t /* fadeOut */)
 {
-    RealTimePluginInstance *plugin = m_synthMap[sodm];
-    if (!plugin) return 0;
+    RealTimePluginInstance *plugin = m_synthMap[model];
+    if (!plugin) {
+        SVDEBUG << "AudioGenerator::mixSparseModel: No plugin" << endl;
+        return 0;
+    }
+
+    SVDEBUG << "AudioGenerator::mixSparseModel: Have plugin" << endl;
 
     size_t latency = plugin->getLatency();
     size_t blocks = frames / m_pluginBlockSize;
@@ -531,39 +609,32 @@
     snd_seq_event_t onEv;
     onEv.type = SND_SEQ_EVENT_NOTEON;
     onEv.data.note.channel = 0;
-    onEv.data.note.note = 64;
-    onEv.data.note.velocity = 100;
 
     snd_seq_event_t offEv;
     offEv.type = SND_SEQ_EVENT_NOTEOFF;
     offEv.data.note.channel = 0;
     offEv.data.note.velocity = 0;
     
-    NoteOffSet &noteOffs = m_noteOffs[sodm];
+    NoteOffSet &noteOffs = m_noteOffs[model];
+
+    int halfSecond = 0.5 * m_sourceSampleRate;
 
     for (size_t i = 0; i < blocks; ++i) {
 
 	size_t reqStart = startFrame + i * m_pluginBlockSize;
 
-	SparseOneDimensionalModel::PointList points =
-	    sodm->getPoints(reqStart + latency,
-			    reqStart + latency + m_pluginBlockSize);
-
         Vamp::RealTime blockTime = Vamp::RealTime::frame2RealTime
 	    (startFrame + i * m_pluginBlockSize, m_sourceSampleRate);
 
-	for (SparseOneDimensionalModel::PointList::iterator pli =
-		 points.begin(); pli != points.end(); ++pli) {
+        Notes notes = getNotesFromModel
+            (model, reqStart, m_pluginBlockSize, latency);
+        
+        for (Notes::const_iterator ni = notes.begin(); ni != notes.end(); ++ni) {
 
-	    size_t pliFrame = pli->frame;
-
-	    if (pliFrame >= latency) pliFrame -= latency;
-
-	    if (pliFrame < reqStart ||
-		pliFrame >= reqStart + m_pluginBlockSize) continue;
+            size_t frame = ni->frame;
 
 	    while (noteOffs.begin() != noteOffs.end() &&
-		   noteOffs.begin()->frame <= pliFrame) {
+		   noteOffs.begin()->frame <= frame) {
 
                 Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
 		    (noteOffs.begin()->frame, m_sourceSampleRate);
@@ -571,26 +642,31 @@
 		offEv.data.note.note = noteOffs.begin()->pitch;
 
 #ifdef DEBUG_AUDIO_GENERATOR
-		std::cerr << "mixModel [sparse]: sending note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << " pitch " << noteOffs.begin()->pitch << std::endl;
+		std::cerr << "mixModel: sending note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << " pitch " << noteOffs.begin()->pitch << std::endl;
 #endif
 
 		plugin->sendEvent(eventTime, &offEv);
 		noteOffs.erase(noteOffs.begin());
-	    }
+            }
+             
+            Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
+		(frame, m_sourceSampleRate);
 
-            Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
-		(pliFrame, m_sourceSampleRate);
-	    
+            onEv.data.note.note = ni->pitch;
+            onEv.data.note.velocity = ni->velocity;
+
 	    plugin->sendEvent(eventTime, &onEv);
 
 #ifdef DEBUG_AUDIO_GENERATOR
-	    std::cout << "mixModel [sparse]: point at frame " << pliFrame << ", block start " << (startFrame + i * m_pluginBlockSize) << ", resulting time " << eventTime << std::endl;
+	    std::cerr << "mixModel: point at frame " << frame << ", block start " << (startFrame + i * m_pluginBlockSize) << ", resulting time " << eventTime << std::endl;
 #endif
-	    
-	    size_t duration = 7000; // frames [for now]
+
+            size_t duration = ni->duration;
+            if (duration == 0) duration = halfSecond;
+
 	    NoteOff noff;
 	    noff.pitch = onEv.data.note.note;
-	    noff.frame = pliFrame + duration;
+	    noff.frame = frame + duration;
 	    noteOffs.insert(noff);
 	}
 
@@ -640,169 +716,3 @@
     return got;
 }
 
-    
-//!!! mucho duplication with above -- refactor
-size_t
-AudioGenerator::mixNoteModel(NoteModel *nm,
-			     size_t startFrame, size_t frames,
-			     float **buffer, float gain, float pan,
-			     size_t /* fadeIn */,
-			     size_t /* fadeOut */)
-{
-    RealTimePluginInstance *plugin = m_synthMap[nm];
-    if (!plugin) return 0;
-
-    size_t latency = plugin->getLatency();
-    size_t blocks = frames / m_pluginBlockSize;
-    
-    //!!! hang on -- the fact that the audio callback play source's
-    //buffer is a multiple of the plugin's buffer size doesn't mean
-    //that we always get called for a multiple of it here (because it
-    //also depends on the JACK block size).  how should we ensure that
-    //all models write the same amount in to the mix, and that we
-    //always have a multiple of the plugin buffer size?  I guess this
-    //class has to be queryable for the plugin buffer size & the
-    //callback play source has to use that as a multiple for all the
-    //calls to mixModel
-
-    size_t got = blocks * m_pluginBlockSize;
-
-#ifdef DEBUG_AUDIO_GENERATOR
-    Vamp::RealTime startTime = Vamp::RealTime::frame2RealTime
-        (startFrame, m_sourceSampleRate);
-
-    std::cout << "mixModel [note]: frames " << frames << " from " << startFrame
-	      << " (time " << startTime << "), blocks " << blocks << std::endl;
-#endif
-
-    snd_seq_event_t onEv;
-    onEv.type = SND_SEQ_EVENT_NOTEON;
-    onEv.data.note.channel = 0;
-    onEv.data.note.note = 64;
-    onEv.data.note.velocity = 100;
-
-    snd_seq_event_t offEv;
-    offEv.type = SND_SEQ_EVENT_NOTEOFF;
-    offEv.data.note.channel = 0;
-    offEv.data.note.velocity = 0;
-    
-    NoteOffSet &noteOffs = m_noteOffs[nm];
-
-    for (size_t i = 0; i < blocks; ++i) {
-
-	size_t reqStart = startFrame + i * m_pluginBlockSize;
-
-	NoteModel::PointList points =
-	    nm->getPoints(reqStart + latency,
-			    reqStart + latency + m_pluginBlockSize);
-
-        Vamp::RealTime blockTime = Vamp::RealTime::frame2RealTime
-	    (startFrame + i * m_pluginBlockSize, m_sourceSampleRate);
-
-	for (NoteModel::PointList::iterator pli =
-		 points.begin(); pli != points.end(); ++pli) {
-
-	    size_t pliFrame = pli->frame;
-
-	    if (pliFrame >= latency) pliFrame -= latency;
-
-	    if (pliFrame < reqStart ||
-		pliFrame >= reqStart + m_pluginBlockSize) continue;
-
-	    while (noteOffs.begin() != noteOffs.end() &&
-		   noteOffs.begin()->frame <= pliFrame) {
-
-                Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
-		    (noteOffs.begin()->frame, m_sourceSampleRate);
-
-		offEv.data.note.note = noteOffs.begin()->pitch;
-
-#ifdef DEBUG_AUDIO_GENERATOR
-		std::cerr << "mixModel [note]: sending note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << " pitch " << noteOffs.begin()->pitch << std::endl;
-#endif
-
-		plugin->sendEvent(eventTime, &offEv);
-		noteOffs.erase(noteOffs.begin());
-	    }
-
-            Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
-		(pliFrame, m_sourceSampleRate);
-	    
-            if (nm->getScaleUnits() == "Hz") {
-                onEv.data.note.note = Pitch::getPitchForFrequency(pli->value);
-            } else {
-                onEv.data.note.note = lrintf(pli->value);
-            }
-
-            if (pli->level > 0.f && pli->level <= 1.f) {
-                onEv.data.note.velocity = lrintf(pli->level * 127);
-            } else {
-                onEv.data.note.velocity = 100;
-            }
-
-	    plugin->sendEvent(eventTime, &onEv);
-
-#ifdef DEBUG_AUDIO_GENERATOR
-	    std::cout << "mixModel [note]: point at frame " << pliFrame << ", pitch " << (int)onEv.data.note.note << ", block start " << (startFrame + i * m_pluginBlockSize) << ", resulting time " << eventTime << std::endl;
-#endif
-	    
-	    size_t duration = pli->duration;
-            if (duration == 0 || duration == 1) {
-                duration = m_sourceSampleRate / 20;
-            }
-	    NoteOff noff;
-	    noff.pitch = onEv.data.note.note;
-	    noff.frame = pliFrame + duration;
-	    noteOffs.insert(noff);
-
-#ifdef DEBUG_AUDIO_GENERATOR
-            std::cout << "mixModel [note]: recording note off at " << noff.frame << std::endl;
-#endif
-	}
-
-	while (noteOffs.begin() != noteOffs.end() &&
-	       noteOffs.begin()->frame <=
-	       startFrame + i * m_pluginBlockSize + m_pluginBlockSize) {
-
-            Vamp::RealTime eventTime = Vamp::RealTime::frame2RealTime
-		(noteOffs.begin()->frame, m_sourceSampleRate);
-
-	    offEv.data.note.note = noteOffs.begin()->pitch;
-
-#ifdef DEBUG_AUDIO_GENERATOR
-            std::cerr << "mixModel [note]: sending leftover note-off event at time " << eventTime << " frame " << noteOffs.begin()->frame << " pitch " << noteOffs.begin()->pitch << std::endl;
-#endif
-
-	    plugin->sendEvent(eventTime, &offEv);
-	    noteOffs.erase(noteOffs.begin());
-	}
-	
-	plugin->run(blockTime);
-	float **outs = plugin->getAudioOutputBuffers();
-
-	for (size_t c = 0; c < m_targetChannelCount; ++c) {
-#ifdef DEBUG_AUDIO_GENERATOR
-	    std::cout << "mixModel [note]: adding " << m_pluginBlockSize << " samples from plugin output " << c << std::endl;
-#endif
-
-	    size_t sourceChannel = (c % plugin->getAudioOutputCount());
-
-	    float channelGain = gain;
-	    if (pan != 0.0) {
-		if (c == 0) {
-		    if (pan > 0.0) channelGain *= 1.0 - pan;
-		} else {
-		    if (pan < 0.0) channelGain *= pan + 1.0;
-		}
-	    }
-
-	    for (size_t j = 0; j < m_pluginBlockSize; ++j) {
-		buffer[c][i * m_pluginBlockSize + j] += 
-		    channelGain * outs[sourceChannel][j];
-	    }
-	}
-    }
-
-    return got;
-}
-
--- a/audioio/AudioGenerator.h	Wed Jun 29 09:57:36 2011 +0100
+++ b/audioio/AudioGenerator.h	Mon Jul 25 17:57:59 2011 +0100
@@ -28,6 +28,7 @@
 
 #include <set>
 #include <map>
+#include <vector>
 
 class AudioGenerator : public QObject
 {
@@ -101,6 +102,14 @@
     bool m_soloing;
     std::set<Model *> m_soloModelSet;
 
+    struct Note {
+        int pitch;
+        size_t frame;
+        size_t duration; // 0 -> "anything" (short example note)
+        int velocity;
+    };
+    typedef std::vector<Note> Notes;
+
     struct NoteOff {
 
 	int pitch;
@@ -128,16 +137,16 @@
     static void initialiseSampleDir();
     static void setSampleDir(RealTimePluginInstance *plugin);
 
+    virtual Notes getNotesFromModel
+    (Model *model, size_t startFrame, size_t frameCount, 
+     size_t latency);
+
     virtual size_t mixDenseTimeValueModel
     (DenseTimeValueModel *model, size_t startFrame, size_t frameCount,
      float **buffer, float gain, float pan, size_t fadeIn, size_t fadeOut);
 
-    virtual size_t mixSparseOneDimensionalModel
-    (SparseOneDimensionalModel *model, size_t startFrame, size_t frameCount,
-     float **buffer, float gain, float pan, size_t fadeIn, size_t fadeOut);
-
-    virtual size_t mixNoteModel
-    (NoteModel *model, size_t startFrame, size_t frameCount,
+    virtual size_t mixSparseModel
+    (Model *model, size_t startFrame, size_t frameCount,
      float **buffer, float gain, float pan, size_t fadeIn, size_t fadeOut);
 
     static const size_t m_pluginBlockSize;