changeset 184:ebd906049fb6

* Change WaveFileModel API from getValues(start,end) to getData(start,count). It's much less error-prone to pass in frame counts instead of start/end locations. Should have done this ages ago. This closes #1794563. * Add option to apply a transform to only the selection region, instead of the whole audio. * (to make the above work properly) Add start frame offset to wave models
author Chris Cannam
date Mon, 01 Oct 2007 13:48:38 +0000
parents 3fdaf3157eea
children f3191ab6d564
files audioio/AudioGenerator.cpp document/Document.cpp document/SVFileReader.cpp main/MainWindow.cpp transform/FeatureExtractionPluginTransform.cpp transform/RealTimePluginTransform.cpp transform/TransformFactory.cpp transform/TransformFactory.h
diffstat 8 files changed, 107 insertions(+), 38 deletions(-) [+]
line wrap: on
line diff
--- a/audioio/AudioGenerator.cpp	Sat Sep 29 10:58:31 2007 +0000
+++ b/audioio/AudioGenerator.cpp	Mon Oct 01 13:48:38 2007 +0000
@@ -358,8 +358,6 @@
 {
     QMutexLocker locker(&m_mutex);
 
-    std::cerr << "setting solo set" << std::endl;
-
     m_soloModelSet = s;
     m_soloing = true;
 }
@@ -467,15 +465,17 @@
 
 	if (prevChannel != sourceChannel) {
 	    if (startFrame >= fadeIn/2) {
-		got = dtvm->getValues
+		got = dtvm->getData
 		    (sourceChannel,
-		     startFrame - fadeIn/2, startFrame + frames + fadeOut/2,
+		     startFrame - fadeIn/2,
+                     frames + fadeOut/2 + fadeIn/2,
 		     channelBuffer);
 	    } else {
 		size_t missing = fadeIn/2 - startFrame;
-		got = dtvm->getValues
+		got = dtvm->getData
 		    (sourceChannel,
-		     0, startFrame + frames + fadeOut/2,
+		     startFrame,
+                     frames + fadeOut/2,
 		     channelBuffer + missing);
 	    }	    
 	}
--- a/document/Document.cpp	Sat Sep 29 10:58:31 2007 +0000
+++ b/document/Document.cpp	Mon Oct 01 13:48:38 2007 +0000
@@ -813,17 +813,24 @@
         }
 
 	if (haveDerivation) {
+
+            QString extentsAttributes;
+            if (rec.context.startFrame != 0 ||
+                rec.context.duration != 0) {
+                extentsAttributes = QString("startFrame=\"%1\" duration=\"%2\" ")
+                    .arg(rec.context.startFrame)
+                    .arg(rec.context.duration);
+            }
 	    
-            //!!! stream the rest of the execution context in both directions (i.e. not just channel)
-
 	    out << indent;
-	    out << QString("  <derivation source=\"%1\" model=\"%2\" channel=\"%3\" domain=\"%4\" stepSize=\"%5\" blockSize=\"%6\" windowType=\"%7\" transform=\"%8\"")
+	    out << QString("  <derivation source=\"%1\" model=\"%2\" channel=\"%3\" domain=\"%4\" stepSize=\"%5\" blockSize=\"%6\" %7windowType=\"%8\" transform=\"%9\"")
 		.arg(XmlExportable::getObjectExportId(rec.source))
 		.arg(XmlExportable::getObjectExportId(i->first))
                 .arg(rec.context.channel)
                 .arg(rec.context.domain)
                 .arg(rec.context.stepSize)
                 .arg(rec.context.blockSize)
+                .arg(extentsAttributes)
                 .arg(int(rec.context.windowType))
 		.arg(XmlExportable::encodeEntities(rec.transform));
 
--- a/document/SVFileReader.cpp	Sat Sep 29 10:58:31 2007 +0000
+++ b/document/SVFileReader.cpp	Mon Oct 01 13:48:38 2007 +0000
@@ -1013,6 +1013,24 @@
     int windowType = attributes.value("windowType").trimmed().toInt(&ok);
     if (ok) m_currentTransformContext.windowType = WindowType(windowType);
 
+    QString startFrameStr = attributes.value("startFrame");
+    QString durationStr = attributes.value("duration");
+
+    size_t startFrame = 0;
+    size_t duration = 0;
+
+    if (startFrameStr != "") {
+        startFrame = startFrameStr.trimmed().toInt(&ok);
+        if (!ok) startFrame = 0;
+    }
+    if (durationStr != "") {
+        duration = durationStr.trimmed().toInt(&ok);
+        if (!ok) duration = 0;
+    }
+
+    m_currentTransformContext.startFrame = startFrame;
+    m_currentTransformContext.duration = duration;
+
     return true;
 }
 
--- a/main/MainWindow.cpp	Sat Sep 29 10:58:31 2007 +0000
+++ b/main/MainWindow.cpp	Mon Oct 01 13:48:38 2007 +0000
@@ -2022,7 +2022,9 @@
 	m_viewManager->setPlaySoloMode(!m_viewManager->getPlaySoloMode());
     }
 
-    if (!m_viewManager->getPlaySoloMode()) {
+    if (m_viewManager->getPlaySoloMode()) {
+        currentPaneChanged(m_paneStack->getCurrentPane());
+    } else {
         m_viewManager->setPlaybackModel(0);
         if (m_playSource) {
             m_playSource->clearSoloModelSet();
@@ -4216,11 +4218,19 @@
     std::vector<Model *> candidateInputModels =
         m_document->getTransformInputModels();
 
+    size_t startFrame = 0, duration = 0;
+    size_t endFrame = 0;
+    m_viewManager->getSelection().getExtents(startFrame, endFrame);
+    if (endFrame > startFrame) duration = endFrame - startFrame;
+    else startFrame = 0;
+
     Model *inputModel = factory->getConfigurationForTransform(transform,
                                                               candidateInputModels,
                                                               context,
                                                               configurationXml,
-                                                              m_playSource);
+                                                              m_playSource,
+                                                              startFrame,
+                                                              duration);
     if (!inputModel) return;
 
 //    std::cerr << "MainWindow::addLayer: Input model is " << inputModel << " \"" << inputModel->objectName().toStdString() << "\"" << std::endl;
--- a/transform/FeatureExtractionPluginTransform.cpp	Sat Sep 29 10:58:31 2007 +0000
+++ b/transform/FeatureExtractionPluginTransform.cpp	Mon Oct 01 13:48:38 2007 +0000
@@ -405,9 +405,9 @@
         startFrame = 0;
     }
 
-    long got = getInput()->getValues
+    long got = getInput()->getData
         ((channelCount == 1 ? m_context.channel : channel),
-         startFrame, startFrame + size, buffer + offset);
+         startFrame, size, buffer + offset);
 
     while (got < size) {
         buffer[offset + got] = 0.0;
--- a/transform/RealTimePluginTransform.cpp	Sat Sep 29 10:58:31 2007 +0000
+++ b/transform/RealTimePluginTransform.cpp	Mon Oct 01 13:48:38 2007 +0000
@@ -138,51 +138,69 @@
     size_t channelCount = input->getChannelCount();
     if (!wwfm && m_context.channel != -1) channelCount = 1;
 
-    size_t blockSize = m_plugin->getBufferSize();
+    long blockSize = m_plugin->getBufferSize();
 
     float **inbufs = m_plugin->getAudioInputBuffers();
 
-    size_t startFrame = m_input->getStartFrame();
-    size_t   endFrame = m_input->getEndFrame();
-    size_t blockFrame = startFrame;
+    long startFrame = m_input->getStartFrame();
+    long   endFrame = m_input->getEndFrame();
+    
+    long contextStart = m_context.startFrame;
+    long contextDuration = m_context.duration;
 
-    size_t prevCompletion = 0;
+    if (contextStart == 0 || contextStart < startFrame) {
+        contextStart = startFrame;
+    }
 
-    size_t latency = m_plugin->getLatency();
+    if (contextDuration == 0) {
+        contextDuration = endFrame - contextStart;
+    }
+    if (contextStart + contextDuration > endFrame) {
+        contextDuration = endFrame - contextStart;
+    }
 
-    while (blockFrame < endFrame + latency && !m_abandoned) {
+    wwfm->setStartFrame(contextStart);
 
-	size_t completion =
-	    (((blockFrame - startFrame) / blockSize) * 99) /
-	    (   (endFrame - startFrame) / blockSize);
+    long blockFrame = contextStart;
 
-	size_t got = 0;
+    long prevCompletion = 0;
+
+    long latency = m_plugin->getLatency();
+
+    while (blockFrame < contextStart + contextDuration + latency &&
+           !m_abandoned) {
+
+	long completion =
+	    (((blockFrame - contextStart) / blockSize) * 99) /
+	    ((contextDuration) / blockSize);
+
+	long got = 0;
 
 	if (channelCount == 1) {
             if (inbufs && inbufs[0]) {
-                got = input->getValues
-                    (m_context.channel, blockFrame, blockFrame + blockSize, inbufs[0]);
+                got = input->getData
+                    (m_context.channel, blockFrame, blockSize, inbufs[0]);
                 while (got < blockSize) {
                     inbufs[0][got++] = 0.0;
                 }          
             }
             for (size_t ch = 1; ch < m_plugin->getAudioInputCount(); ++ch) {
-                for (size_t i = 0; i < blockSize; ++i) {
+                for (long i = 0; i < blockSize; ++i) {
                     inbufs[ch][i] = inbufs[0][i];
                 }
             }
 	} else {
 	    for (size_t ch = 0; ch < channelCount; ++ch) {
                 if (inbufs && inbufs[ch]) {
-                    got = input->getValues
-                        (ch, blockFrame, blockFrame + blockSize, inbufs[ch]);
+                    got = input->getData
+                        (ch, blockFrame, blockSize, inbufs[ch]);
                     while (got < blockSize) {
                         inbufs[ch][got++] = 0.0;
                     }
                 }
 	    }
             for (size_t ch = channelCount; ch < m_plugin->getAudioInputCount(); ++ch) {
-                for (size_t i = 0; i < blockSize; ++i) {
+                for (long i = 0; i < blockSize; ++i) {
                     inbufs[ch][i] = inbufs[ch % channelCount][i];
                 }
             }
@@ -208,7 +226,7 @@
 
             float value = m_plugin->getControlOutputValue(m_outputNo);
 
-            size_t pointFrame = blockFrame;
+            long pointFrame = blockFrame;
             if (pointFrame > latency) pointFrame -= latency;
             else pointFrame = 0;
 
@@ -222,12 +240,13 @@
             if (outbufs) {
 
                 if (blockFrame >= latency) {
-                    size_t writeSize = std::min(blockSize,
-                                                endFrame + latency - blockFrame);
+                    long writeSize = std::min
+                        (blockSize,
+                         contextStart + contextDuration + latency - blockFrame);
                     wwfm->addSamples(outbufs, writeSize);
                 } else if (blockFrame + blockSize >= latency) {
-                    size_t offset = latency - blockFrame;
-                    size_t count = blockSize - offset;
+                    long offset = latency - blockFrame;
+                    long count = blockSize - offset;
                     float **tmp = new float *[channelCount];
                     for (size_t c = 0; c < channelCount; ++c) {
                         tmp[c] = outbufs[c] + offset;
@@ -238,7 +257,7 @@
             }
         }
 
-	if (blockFrame == startFrame || completion > prevCompletion) {
+	if (blockFrame == contextStart || completion > prevCompletion) {
 	    if (stvm) stvm->setCompletion(completion);
 	    if (wwfm) wwfm->setCompletion(completion);
 	    prevCompletion = completion;
--- a/transform/TransformFactory.cpp	Sat Sep 29 10:58:31 2007 +0000
+++ b/transform/TransformFactory.cpp	Mon Oct 01 13:48:38 2007 +0000
@@ -516,7 +516,9 @@
                                                const std::vector<Model *> &candidateInputModels,
                                                PluginTransform::ExecutionContext &context,
                                                QString &configurationXml,
-                                               AudioCallbackPlaySource *source)
+                                               AudioCallbackPlaySource *source,
+                                               size_t startFrame,
+                                               size_t duration)
 {
     if (candidateInputModels.empty()) return 0;
 
@@ -651,6 +653,10 @@
             dialog->setCandidateInputModels(candidateModelNames);
         }
 
+        if (startFrame != 0 || duration != 0) {
+            dialog->setShowSelectionOnlyOption(true);
+        }
+
         if (targetChannels > 0) {
             dialog->setChannelArrangement(sourceChannels, targetChannels,
                                           defaultChannel);
@@ -678,6 +684,13 @@
 
         configurationXml = PluginXml(plugin).toXmlString();
         context.channel = dialog->getChannel();
+        
+        if (startFrame != 0 || duration != 0) {
+            if (dialog->getSelectionOnly()) {
+                context.startFrame = startFrame;
+                context.duration = duration;
+            }
+        }
 
         dialog->getProcessingParameters(context.stepSize,
                                         context.blockSize,
--- a/transform/TransformFactory.h	Sat Sep 29 10:58:31 2007 +0000
+++ b/transform/TransformFactory.h	Mon Oct 01 13:48:38 2007 +0000
@@ -89,7 +89,9 @@
                                         const std::vector<Model *> &candidateInputModels,
                                         PluginTransform::ExecutionContext &context,
                                         QString &configurationXml,
-                                        AudioCallbackPlaySource *source = 0);
+                                        AudioCallbackPlaySource *source = 0,
+                                        size_t startFrame = 0,
+                                        size_t duration = 0);
 
     /**
      * Get the default execution context for the given transform