changeset 106:d94ee3e8dfe1

modify the way the RingBuffer is read to let the TimeStretchFilter be the master
author lbajardsilogic
date Wed, 12 Sep 2007 08:47:36 +0000
parents 490e955a21f8
children c3ac34b2e45b
files sv/audioio/AudioCallbackPlaySource.cpp sv/filter/FilterStack.cpp sv/filter/FilterStack.h sv/filter/TimeStretchFilter.cpp sv/filter/TimeStretchFilter.h
diffstat 5 files changed, 425 insertions(+), 312 deletions(-) [+]
line wrap: on
line diff
--- a/sv/audioio/AudioCallbackPlaySource.cpp	Tue Sep 04 07:54:09 2007 +0000
+++ b/sv/audioio/AudioCallbackPlaySource.cpp	Wed Sep 12 08:47:36 2007 +0000
@@ -726,7 +726,7 @@
         existingStretcher->getRatio() == factor &&
         existingStretcher->getSharpening() == sharpen &&
         existingStretcher->getChannelCount() == channels) {
-	return;
+		return;
     }
 
     if (factor != 1) {
@@ -734,25 +734,25 @@
         if (existingStretcher &&
             existingStretcher->getSharpening() == sharpen &&
             existingStretcher->getChannelCount() == channels) {
-            existingStretcher->setRatio(factor);
-            return;
+			existingStretcher->setRatio(factor);
+			return;
         }
 
-	PhaseVocoderTimeStretcher *newStretcher = new PhaseVocoderTimeStretcher
+		PhaseVocoderTimeStretcher *newStretcher = new PhaseVocoderTimeStretcher
 	    (getTargetSampleRate(),
              channels,
              factor,
              sharpen,
              getTargetBlockSize());
 
-	m_timeStretcher = newStretcher;
+		m_timeStretcher = newStretcher;
 
     } else {
-	m_timeStretcher = 0;
+		m_timeStretcher = 0;
     }
 
     if (existingStretcher) {
-	m_timeStretcherScavenger.claim(existingStretcher);
+		m_timeStretcherScavenger.claim(existingStretcher);
     }
 }
 
@@ -797,7 +797,7 @@
 
     if (count == 0) return 0;
 
-    PhaseVocoderTimeStretcher *ts = m_timeStretcher;
+/*    PhaseVocoderTimeStretcher *ts = m_timeStretcher;
 
     if (!ts || ts->getRatio() == 1) {
 
@@ -923,8 +923,12 @@
 
     applyAuditioningEffect(count, buffer);
 
+	*/
+
 	applyRealTimeFilters(count, buffer);
 
+	applyAuditioningEffect(count, buffer);
+
     m_condition.wakeAll();
 
     return count;
@@ -983,11 +987,11 @@
 
     size_t space = 0;
     for (size_t c = 0; c < getTargetChannelCount(); ++c) {
-	RingBuffer<float> *wb = getWriteRingBuffer(c);
-	if (wb) {
-	    size_t spaceHere = wb->getWriteSpace();
-	    if (c == 0 || spaceHere < space) space = spaceHere;
-	}
+		RingBuffer<float> *wb = getWriteRingBuffer(c);
+		if (wb) {
+			size_t spaceHere = wb->getWriteSpace();
+			if (c == 0 || spaceHere < space) space = spaceHere;
+		}
     }
     
     if (space == 0) return false;
@@ -1019,164 +1023,164 @@
     static size_t bufferPtrCount = 0;
 
     if (bufferPtrCount < channels) {
-	if (bufferPtrs) delete[] bufferPtrs;
-	bufferPtrs = new float *[channels];
-	bufferPtrCount = channels;
+		if (bufferPtrs) delete[] bufferPtrs;
+		bufferPtrs = new float *[channels];
+		bufferPtrCount = channels;
     }
 
     size_t generatorBlockSize = m_audioGenerator->getBlockSize();
 
     if (resample && !m_converter) {
-	static bool warned = false;
-	if (!warned) {
-	    std::cerr << "WARNING: sample rates differ, but no converter available!" << std::endl;
-	    warned = true;
-	}
+		static bool warned = false;
+		if (!warned) {
+			std::cerr << "WARNING: sample rates differ, but no converter available!" << std::endl;
+			warned = true;
+		}
     }
 
     if (resample && m_converter) {
 
-	double ratio =
-	    double(getTargetSampleRate()) / double(getSourceSampleRate());
-	orig = size_t(orig / ratio + 0.1);
+		double ratio =
+			double(getTargetSampleRate()) / double(getSourceSampleRate());
+		orig = size_t(orig / ratio + 0.1);
 
-	// orig must be a multiple of generatorBlockSize
-	orig = (orig / generatorBlockSize) * generatorBlockSize;
-	if (orig == 0) return false;
+		// orig must be a multiple of generatorBlockSize
+		orig = (orig / generatorBlockSize) * generatorBlockSize;
+		if (orig == 0) return false;
 
-	size_t work = max(orig, space);
+		size_t work = max(orig, space);
 
-	// We only allocate one buffer, but we use it in two halves.
-	// We place the non-interleaved values in the second half of
-	// the buffer (orig samples for channel 0, orig samples for
-	// channel 1 etc), and then interleave them into the first
-	// half of the buffer.  Then we resample back into the second
-	// half (interleaved) and de-interleave the results back to
-	// the start of the buffer for insertion into the ringbuffers.
-	// What a faff -- especially as we've already de-interleaved
-	// the audio data from the source file elsewhere before we
-	// even reach this point.
-	
-	if (tmpSize < channels * work * 2) {
-	    delete[] tmp;
-	    tmp = new float[channels * work * 2];
-	    tmpSize = channels * work * 2;
-	}
+		// We only allocate one buffer, but we use it in two halves.
+		// We place the non-interleaved values in the second half of
+		// the buffer (orig samples for channel 0, orig samples for
+		// channel 1 etc), and then interleave them into the first
+		// half of the buffer.  Then we resample back into the second
+		// half (interleaved) and de-interleave the results back to
+		// the start of the buffer for insertion into the ringbuffers.
+		// What a faff -- especially as we've already de-interleaved
+		// the audio data from the source file elsewhere before we
+		// even reach this point.
+		
+		if (tmpSize < channels * work * 2) {
+			delete[] tmp;
+			tmp = new float[channels * work * 2];
+			tmpSize = channels * work * 2;
+		}
 
-	float *nonintlv = tmp + channels * work;
-	float *intlv = tmp;
-	float *srcout = tmp + channels * work;
-	
-	for (size_t c = 0; c < channels; ++c) {
-	    for (size_t i = 0; i < orig; ++i) {
-		nonintlv[channels * i + c] = 0.0f;
-	    }
-	}
+		float *nonintlv = tmp + channels * work;
+		float *intlv = tmp;
+		float *srcout = tmp + channels * work;
+		
+		for (size_t c = 0; c < channels; ++c) {
+			for (size_t i = 0; i < orig; ++i) {
+				nonintlv[channels * i + c] = 0.0f;
+			}
+		}
 
-	for (size_t c = 0; c < channels; ++c) {
-	    bufferPtrs[c] = nonintlv + c * orig;
-	}
+		for (size_t c = 0; c < channels; ++c) {
+			bufferPtrs[c] = nonintlv + c * orig;
+		}
 
-	got = mixModels(f, orig, bufferPtrs);
+		got = mixModels(f, orig, bufferPtrs);
 
-	// and interleave into first half
-	for (size_t c = 0; c < channels; ++c) {
-	    for (size_t i = 0; i < got; ++i) {
-		float sample = nonintlv[c * got + i];
-		intlv[channels * i + c] = sample;
-	    }
-	}
+		// and interleave into first half
+		for (size_t c = 0; c < channels; ++c) {
+			for (size_t i = 0; i < got; ++i) {
+				float sample = nonintlv[c * got + i];
+				intlv[channels * i + c] = sample;
+			}
+		}
+			
+		SRC_DATA data;
+		data.data_in = intlv;
+		data.data_out = srcout;
+		data.input_frames = got;
+		data.output_frames = work;
+		data.src_ratio = ratio;
+		data.end_of_input = 0;
 		
-	SRC_DATA data;
-	data.data_in = intlv;
-	data.data_out = srcout;
-	data.input_frames = got;
-	data.output_frames = work;
-	data.src_ratio = ratio;
-	data.end_of_input = 0;
-	
-	int err = 0;
+		int err = 0;
 
-        if (m_timeStretcher && m_timeStretcher->getRatio() < 0.4) {
+		if (m_timeStretcher && m_timeStretcher->getRatio() < 0.4) {
 #ifdef DEBUG_AUDIO_PLAY_SOURCE
-            std::cout << "Using crappy converter" << std::endl;
+			std::cout << "Using crappy converter" << std::endl;
 #endif
-            src_process(m_crapConverter, &data);
-        } else {
-            src_process(m_converter, &data);
-        }
+			src_process(m_crapConverter, &data);
+		} else {
+			src_process(m_converter, &data);
+		}
 
-	size_t toCopy = size_t(got * ratio + 0.1);
+		size_t toCopy = size_t(got * ratio + 0.1);
 
-	if (err) {
-	    std::cerr
-		<< "AudioCallbackPlaySourceFillThread: ERROR in samplerate conversion: "
-		<< src_strerror(err) << std::endl;
-	    //!!! Then what?
+		if (err) {
+			std::cerr
+			<< "AudioCallbackPlaySourceFillThread: ERROR in samplerate conversion: "
+			<< src_strerror(err) << std::endl;
+			//!!! Then what?
+		} else {
+			got = data.input_frames_used;
+			toCopy = data.output_frames_gen;
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+			std::cout << "Resampled " << got << " frames to " << toCopy << " frames" << std::endl;
+#endif
+		}
+		
+		for (size_t c = 0; c < channels; ++c) {
+			for (size_t i = 0; i < toCopy; ++i) {
+				tmp[i] = srcout[channels * i + c];
+			}
+			RingBuffer<float> *wb = getWriteRingBuffer(c);
+			if (wb) wb->write(tmp, toCopy);
+		}
+
+		m_writeBufferFill = f;
+		if (readWriteEqual) m_readBufferFill = f;
+
 	} else {
-	    got = data.input_frames_used;
-	    toCopy = data.output_frames_gen;
+
+		// space must be a multiple of generatorBlockSize
+		space = (space / generatorBlockSize) * generatorBlockSize;
+		if (space == 0) return false;
+
+		if (tmpSize < channels * space) {
+			delete[] tmp;
+			tmp = new float[channels * space];
+			tmpSize = channels * space;
+		}
+
+		for (size_t c = 0; c < channels; ++c) {
+
+			bufferPtrs[c] = tmp + c * space;
+		    
+			for (size_t i = 0; i < space; ++i) {
+				tmp[c * space + i] = 0.0f;
+			}
+		}
+
+		size_t got = mixModels(f, space, bufferPtrs);
+
+		for (size_t c = 0; c < channels; ++c) {
+
+			RingBuffer<float> *wb = getWriteRingBuffer(c);
+			if (wb) {
+				size_t actual = wb->write(bufferPtrs[c], got);
 #ifdef DEBUG_AUDIO_PLAY_SOURCE
-	    std::cout << "Resampled " << got << " frames to " << toCopy << " frames" << std::endl;
+			std::cout << "Wrote " << actual << " samples for ch " << c << ", now "
+				  << wb->getReadSpace() << " to read" 
+				  << std::endl;
 #endif
-	}
-	
-	for (size_t c = 0; c < channels; ++c) {
-	    for (size_t i = 0; i < toCopy; ++i) {
-		tmp[i] = srcout[channels * i + c];
-	    }
-	    RingBuffer<float> *wb = getWriteRingBuffer(c);
-	    if (wb) wb->write(tmp, toCopy);
-	}
+				if (actual < got) {
+					std::cerr << "WARNING: Buffer overrun in channel " << c
+							  << ": wrote " << actual << " of " << got
+							  << " samples" << std::endl;
+				}
+			}
+		}
 
-	m_writeBufferFill = f;
-	if (readWriteEqual) m_readBufferFill = f;
+		m_writeBufferFill = f;
+		if (readWriteEqual) m_readBufferFill = f;
 
-    } else {
-
-	// space must be a multiple of generatorBlockSize
-	space = (space / generatorBlockSize) * generatorBlockSize;
-	if (space == 0) return false;
-
-	if (tmpSize < channels * space) {
-	    delete[] tmp;
-	    tmp = new float[channels * space];
-	    tmpSize = channels * space;
-	}
-
-	for (size_t c = 0; c < channels; ++c) {
-
-	    bufferPtrs[c] = tmp + c * space;
-	    
-	    for (size_t i = 0; i < space; ++i) {
-		tmp[c * space + i] = 0.0f;
-	    }
-	}
-
-	size_t got = mixModels(f, space, bufferPtrs);
-
-	for (size_t c = 0; c < channels; ++c) {
-
-	    RingBuffer<float> *wb = getWriteRingBuffer(c);
-	    if (wb) {
-                size_t actual = wb->write(bufferPtrs[c], got);
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-		std::cout << "Wrote " << actual << " samples for ch " << c << ", now "
-			  << wb->getReadSpace() << " to read" 
-			  << std::endl;
-#endif
-                if (actual < got) {
-                    std::cerr << "WARNING: Buffer overrun in channel " << c
-                              << ": wrote " << actual << " of " << got
-                              << " samples" << std::endl;
-                }
-            }
-	}
-
-	m_writeBufferFill = f;
-	if (readWriteEqual) m_readBufferFill = f;
-
-	//!!! how do we know when ended? need to mark up a fully-buffered flag and check this if we find the buffers empty in getSourceSamples
+		//!!! how do we know when ended? need to mark up a fully-buffered flag and check this if we find the buffers empty in getSourceSamples
     }
 
     return true;
@@ -1204,126 +1208,126 @@
 #endif
 
     if (chunkBufferPtrCount < channels) {
-	if (chunkBufferPtrs) delete[] chunkBufferPtrs;
-	chunkBufferPtrs = new float *[channels];
-	chunkBufferPtrCount = channels;
+		if (chunkBufferPtrs) delete[] chunkBufferPtrs;
+		chunkBufferPtrs = new float *[channels];
+		chunkBufferPtrCount = channels;
     }
 
     for (size_t c = 0; c < channels; ++c) {
-	chunkBufferPtrs[c] = buffers[c];
+		chunkBufferPtrs[c] = buffers[c];
     }
 
     while (processed < count) {
 	
-	chunkSize = count - processed;
-	nextChunkStart = chunkStart + chunkSize;
-	selectionSize = 0;
+		chunkSize = count - processed;
+		nextChunkStart = chunkStart + chunkSize;
+		selectionSize = 0;
 
-	size_t fadeIn = 0, fadeOut = 0;
+		size_t fadeIn = 0, fadeOut = 0;
 
-	if (constrained) {
-	    
-	    Selection selection =
-		m_viewManager->getContainingSelection(chunkStart, true);
-	    
-	    if (selection.isEmpty()) {
-		if (looping) {
-		    selection = *m_viewManager->getSelections().begin();
-		    chunkStart = selection.getStartFrame();
-		    fadeIn = 50;
+		if (constrained) {
+		    
+			Selection selection =
+			m_viewManager->getContainingSelection(chunkStart, true);
+		    
+			if (selection.isEmpty()) {
+				if (looping) {
+					selection = *m_viewManager->getSelections().begin();
+					chunkStart = selection.getStartFrame();
+					fadeIn = 50;
+				}
+			}
+
+			if (selection.isEmpty()) {
+
+				chunkSize = 0;
+				nextChunkStart = chunkStart;
+
+			} else {
+
+				selectionSize =
+					selection.getEndFrame() -
+					selection.getStartFrame();
+
+				if (chunkStart < selection.getStartFrame()) {
+					chunkStart = selection.getStartFrame();
+					fadeIn = 50;
+				}
+
+				nextChunkStart = chunkStart + chunkSize;
+
+				if (nextChunkStart >= selection.getEndFrame()) {
+					nextChunkStart = selection.getEndFrame();
+					fadeOut = 50;
+				}
+
+				chunkSize = nextChunkStart - chunkStart;
+			}
+		
+		} else if (looping && m_lastModelEndFrame > 0) {
+
+			if (chunkStart >= m_lastModelEndFrame) {
+				chunkStart = 0;
+			}
+			if (chunkSize > m_lastModelEndFrame - chunkStart) {
+				chunkSize = m_lastModelEndFrame - chunkStart;
+			}
+			nextChunkStart = chunkStart + chunkSize;
 		}
-	    }
+		
+	//	std::cout << "chunkStart " << chunkStart << ", chunkSize " << chunkSize << ", nextChunkStart " << nextChunkStart << ", frame " << frame << ", count " << count << ", processed " << processed << std::endl;
 
-	    if (selection.isEmpty()) {
-
-		chunkSize = 0;
-		nextChunkStart = chunkStart;
-
-	    } else {
-
-		selectionSize =
-		    selection.getEndFrame() -
-		    selection.getStartFrame();
-
-		if (chunkStart < selection.getStartFrame()) {
-		    chunkStart = selection.getStartFrame();
-		    fadeIn = 50;
+		if (!chunkSize) {
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+			std::cout << "Ending selection playback at " << nextChunkStart << std::endl;
+#endif
+			// We need to maintain full buffers so that the other
+			// thread can tell where it's got to in the playback -- so
+			// return the full amount here
+			frame = frame + count;
+			return count;
 		}
 
-		nextChunkStart = chunkStart + chunkSize;
+#ifdef DEBUG_AUDIO_PLAY_SOURCE
+		std::cout << "Selection playback: chunk at " << chunkStart << " -> " << nextChunkStart << " (size " << chunkSize << ")" << std::endl;
+#endif
 
-		if (nextChunkStart >= selection.getEndFrame()) {
-		    nextChunkStart = selection.getEndFrame();
-		    fadeOut = 50;
+		size_t got = 0;
+
+		if (selectionSize < 100) {
+			fadeIn = 0;
+			fadeOut = 0;
+		} else if (selectionSize < 300) {
+			if (fadeIn > 0) fadeIn = 10;
+			if (fadeOut > 0) fadeOut = 10;
 		}
 
-		chunkSize = nextChunkStart - chunkStart;
-	    }
-	
-	} else if (looping && m_lastModelEndFrame > 0) {
+		if (fadeIn > 0) {
+			if (processed * 2 < fadeIn) {
+				fadeIn = processed * 2;
+			}
+		}
 
-	    if (chunkStart >= m_lastModelEndFrame) {
-		chunkStart = 0;
-	    }
-	    if (chunkSize > m_lastModelEndFrame - chunkStart) {
-		chunkSize = m_lastModelEndFrame - chunkStart;
-	    }
-	    nextChunkStart = chunkStart + chunkSize;
-	}
-	
-//	std::cout << "chunkStart " << chunkStart << ", chunkSize " << chunkSize << ", nextChunkStart " << nextChunkStart << ", frame " << frame << ", count " << count << ", processed " << processed << std::endl;
+		if (fadeOut > 0) {
+			if ((count - processed - chunkSize) * 2 < fadeOut) {
+				fadeOut = (count - processed - chunkSize) * 2;
+			}
+		}
 
-	if (!chunkSize) {
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-	    std::cout << "Ending selection playback at " << nextChunkStart << std::endl;
-#endif
-	    // We need to maintain full buffers so that the other
-	    // thread can tell where it's got to in the playback -- so
-	    // return the full amount here
-	    frame = frame + count;
-	    return count;
-	}
+		for (std::set<Model *>::iterator mi = m_models.begin();
+			 mi != m_models.end(); ++mi) {
+		    
+			got = m_audioGenerator->mixModel(*mi, chunkStart, 
+							 chunkSize, chunkBufferPtrs,
+							 fadeIn, fadeOut);
+		}
 
-#ifdef DEBUG_AUDIO_PLAY_SOURCE
-	std::cout << "Selection playback: chunk at " << chunkStart << " -> " << nextChunkStart << " (size " << chunkSize << ")" << std::endl;
-#endif
+		for (size_t c = 0; c < channels; ++c) {
+			chunkBufferPtrs[c] += chunkSize;
+		}
 
-	size_t got = 0;
-
-	if (selectionSize < 100) {
-	    fadeIn = 0;
-	    fadeOut = 0;
-	} else if (selectionSize < 300) {
-	    if (fadeIn > 0) fadeIn = 10;
-	    if (fadeOut > 0) fadeOut = 10;
-	}
-
-	if (fadeIn > 0) {
-	    if (processed * 2 < fadeIn) {
-		fadeIn = processed * 2;
-	    }
-	}
-
-	if (fadeOut > 0) {
-	    if ((count - processed - chunkSize) * 2 < fadeOut) {
-		fadeOut = (count - processed - chunkSize) * 2;
-	    }
-	}
-
-	for (std::set<Model *>::iterator mi = m_models.begin();
-	     mi != m_models.end(); ++mi) {
-	    
-	    got = m_audioGenerator->mixModel(*mi, chunkStart, 
-					     chunkSize, chunkBufferPtrs,
-					     fadeIn, fadeOut);
-	}
-
-	for (size_t c = 0; c < channels; ++c) {
-	    chunkBufferPtrs[c] += chunkSize;
-	}
-
-	processed += chunkSize;
-	chunkStart = nextChunkStart;
+		processed += chunkSize;
+		chunkStart = nextChunkStart;
     }
 
 #ifdef DEBUG_AUDIO_PLAY_SOURCE
@@ -1412,59 +1416,59 @@
 
     while (!s.m_exiting) {
 
-	s.unifyRingBuffers();
-	s.m_bufferScavenger.scavenge();
-        s.m_pluginScavenger.scavenge();
-	s.m_timeStretcherScavenger.scavenge();
+		s.unifyRingBuffers();
+		s.m_bufferScavenger.scavenge();
+		s.m_pluginScavenger.scavenge();
+		s.m_timeStretcherScavenger.scavenge();
 
-	if (work && s.m_playing && s.getSourceSampleRate()) {
-	    
+		if (work && s.m_playing && s.getSourceSampleRate()) {
+		    
 #ifdef DEBUG_AUDIO_PLAY_SOURCE
-	    std::cout << "AudioCallbackPlaySourceFillThread: not waiting" << std::endl;
+			std::cout << "AudioCallbackPlaySourceFillThread: not waiting" << std::endl;
 #endif
 
-	    s.m_mutex.unlock();
-	    s.m_mutex.lock();
+			s.m_mutex.unlock();
+			s.m_mutex.lock();
 
-	} else {
-	    
-	    float ms = 100;
-	    if (s.getSourceSampleRate() > 0) {
-		ms = float(m_ringBufferSize) / float(s.getSourceSampleRate()) * 1000.0;
-	    }
-	    
-	    if (s.m_playing) ms /= 10;
+		} else {
+		    
+			float ms = 100;
+			if (s.getSourceSampleRate() > 0) {
+				ms = float(m_ringBufferSize) / float(s.getSourceSampleRate()) * 1000.0;
+			}
+		    
+			if (s.m_playing) ms /= 10;
 
 #ifdef DEBUG_AUDIO_PLAY_SOURCE
-            if (!s.m_playing) std::cout << std::endl;
-	    std::cout << "AudioCallbackPlaySourceFillThread: waiting for " << ms << "ms..." << std::endl;
+				if (!s.m_playing) std::cout << std::endl;
+			std::cout << "AudioCallbackPlaySourceFillThread: waiting for " << ms << "ms..." << std::endl;
 #endif
-	    
-	    s.m_condition.wait(&s.m_mutex, size_t(ms));
-	}
+		    
+			s.m_condition.wait(&s.m_mutex, size_t(ms));
+		}
 
 #ifdef DEBUG_AUDIO_PLAY_SOURCE
-	std::cout << "AudioCallbackPlaySourceFillThread: awoken" << std::endl;
+		std::cout << "AudioCallbackPlaySourceFillThread: awoken" << std::endl;
 #endif
 
-	work = false;
+		work = false;
 
-	if (!s.getSourceSampleRate()) continue;
+		if (!s.getSourceSampleRate()) continue;
 
-	bool playing = s.m_playing;
+		bool playing = s.m_playing;
 
-	if (playing && !previouslyPlaying) {
+		if (playing && !previouslyPlaying) {
 #ifdef DEBUG_AUDIO_PLAY_SOURCE
-	    std::cout << "AudioCallbackPlaySourceFillThread: playback state changed, resetting" << std::endl;
+			std::cout << "AudioCallbackPlaySourceFillThread: playback state changed, resetting" << std::endl;
 #endif
-	    for (size_t c = 0; c < s.getTargetChannelCount(); ++c) {
-		RingBuffer<float> *rb = s.getReadRingBuffer(c);
-		if (rb) rb->reset();
-	    }
-	}
-	previouslyPlaying = playing;
+			for (size_t c = 0; c < s.getTargetChannelCount(); ++c) {
+				RingBuffer<float> *rb = s.getReadRingBuffer(c);
+				if (rb) rb->reset();
+			}
+		}
+		previouslyPlaying = playing;
 
-	work = s.fillBuffers();
+		work = s.fillBuffers();
     }
 
     s.m_mutex.unlock();
@@ -1474,7 +1478,7 @@
 {
 	if (!m_filterStack) return;
 
-	size_t required = m_filterStack->getRequiredInputSamples(count);
+/*	size_t required = m_filterStack->getRequiredInputSamples(count);
 
 	if (required <= count)
 	{
@@ -1485,7 +1489,7 @@
 		size_t missing = required - count;
 
 		size_t channels = getTargetChannelCount();
-
+		
 		size_t got = required;
 
 		float **ib = (float**) malloc(channels*sizeof(float*));
@@ -1498,7 +1502,7 @@
 			}
             RingBuffer<float> *rb = getReadRingBuffer(c);
             if (rb) {
-				size_t gotHere = rb->peek(ib[c]+count, missing);
+				size_t gotHere = rb->peek(ib[c]+count, missing); //should be got not missing parameter !!!!
 				if (gotHere < got)
 					got = gotHere;
 			}
@@ -1518,5 +1522,74 @@
 		delete ib;
 	}
 	m_filterStack->getOutput(buffers, count);
+*/
 
+	size_t required = m_filterStack->getRequiredInputSamples(count);
+
+	size_t channels = getTargetChannelCount();
+
+	size_t got = required;
+
+	//if no filters are available
+	if (required == 0)
+	{
+		got = count;
+		for (size_t ch = 0; ch < channels; ++ch) 
+		{
+			RingBuffer<float> *rb = getReadRingBuffer(ch);
+			if (rb) {
+				size_t gotHere = rb->read(buffers[ch], got);
+				if (gotHere < got)
+					got = gotHere;
+		    }
+
+			for (size_t ch = 0; ch < channels; ++ch) {
+				for (size_t i = got; i < count; ++i) {
+					buffers[ch][i] = 0.0;
+				}
+			}
+		}
+		return;
+	}
+
+	float **ib = (float**) malloc(channels*sizeof(float*));
+
+	for (size_t c = 0; c < channels; ++c) 
+	{
+		ib[c] = (float*) malloc(required*sizeof(float));
+		RingBuffer<float> *rb = getReadRingBuffer(c);
+        if (rb) {
+			size_t gotHere = rb->peek(ib[c], got);
+			if (gotHere < got)
+				got = gotHere;
+		}
+	}
+	if (got < required)
+	{
+		std::cerr << "ERROR applyRealTimeFilters(): Read underrun in playback ("
+              << got << " < " << required << ")" << std::endl;
+		return; 
+	}
+
+    m_filterStack->putInput(ib, required);
+
+	m_filterStack->getOutput(buffers, count);
+
+	//move the read pointer
+	got = m_filterStack->getRequiredSkipSamples();
+	for (size_t c = 0; c < channels; ++c) 
+	{
+		RingBuffer<float> *rb = getReadRingBuffer(c);
+        if (rb) {
+			size_t gotHere = rb->skip(got);
+			if (gotHere < got)
+				got = gotHere;
+		}
+	}
+	
+	//delete
+	for (size_t c = 0; c < channels; ++c) {
+		delete ib[c];
+	}
+	delete ib;
 }
\ No newline at end of file
--- a/sv/filter/FilterStack.cpp	Tue Sep 04 07:54:09 2007 +0000
+++ b/sv/filter/FilterStack.cpp	Wed Sep 12 08:47:36 2007 +0000
@@ -13,6 +13,8 @@
  
 #include "FilterStack.h"
 
+#include "TimeStretchFilter.h"
+
 FilterStack::FilterStack() : QObject()
 {}
 
@@ -130,6 +132,25 @@
 	return max;
 }
 
+size_t FilterStack::getRequiredSkipSamples()
+{
+	size_t skip = 1024;
+
+	std::map<int, Filter *>::iterator iter;
+
+	for (iter = m_filters.begin(); iter != m_filters.end(); iter++)
+	{
+		// find the time filter
+		Filter * filter = iter->second;
+		if (filter->objectName() == "Pitch-Time Stretching")
+		{
+			TimeStretchFilter * timefilter = (TimeStretchFilter *) filter;
+			skip = timefilter->getRequiredSkipSamples();
+		}
+	}
+	return skip;
+}
+
 void FilterStack::setSourceChannelCount(size_t channel)
 {
 	m_sourceChannelCount = channel;
--- a/sv/filter/FilterStack.h	Tue Sep 04 07:54:09 2007 +0000
+++ b/sv/filter/FilterStack.h	Wed Sep 12 08:47:36 2007 +0000
@@ -38,6 +38,7 @@
 	QString getUniqueFilterName(QString candidate);
 
 	size_t getRequiredInputSamples(size_t outputSamplesNeeded);
+	size_t getRequiredSkipSamples();
 
 	void setSourceChannelCount(size_t channel);
 
--- a/sv/filter/TimeStretchFilter.cpp	Tue Sep 04 07:54:09 2007 +0000
+++ b/sv/filter/TimeStretchFilter.cpp	Wed Sep 12 08:47:36 2007 +0000
@@ -56,7 +56,8 @@
 	m_transcheck(false),
 	m_peakcheck(false),
 	m_framesize(4096),
-	m_interpfactor(1)
+	m_interpfactor(1),
+	m_transhold(0)
 {
 	m_hop = m_framesize/4;
 
@@ -124,7 +125,7 @@
 TimeStretchFilter::PropertyList TimeStretchFilter::getProperties() const
 {
 	PropertyList list;
-    //list.push_back("Time");
+    list.push_back("Time");
 	list.push_back("Pitch");
 	list.push_back("Bypass");
 	list.push_back("Transdetect");
@@ -242,22 +243,23 @@
 	float interpsample;
 
 	bool drum = 0;
-	float drumthresh = 65;
-	int transhold = 0;
+	float drumthresh = 65;
 
-	if (samples < floor(m_framesize*m_interpfactor + 1))
+	int currentposition = m_hop+1;
+
+	if (samples < floor((m_framesize-1)*m_interpfactor + 1 + 1025))
 		return;
 
 	int channel = getSourceChannelCount();
 
 	for (int i=0; i<samples; i++){
 		if (channel > 1)
-			m_inputBuffer[i] = (input[0][i] + input[1][i]) /2;
+			m_inputBuffer[i] = input[0][i];// + input[1][i]) /2;
 		else
 			m_inputBuffer[i] = input[0][i];
 	}
 	
-	for (int i = 0; i<(m_framesize); i++)
+	for (int i = 0; i<m_framesize; i++)
 	{
 			
 		//This block was specifically written to do resampling interpolation for crude pitch shifting
@@ -269,39 +271,43 @@
 			difratio = (double(i*m_interpfactor)) - floor(double(i*m_interpfactor));
 			
 			// this block loads a frame as normal
-			sampdiff=m_inputBuffer[dd+1]-m_inputBuffer[dd];
-			interpsample = (difratio*sampdiff)+m_inputBuffer[dd];
-			audioframe[i] = (interpsample)*window[i];
+			sampdiff=m_inputBuffer[dd+currentposition+1]-m_inputBuffer[dd+currentposition];
+			interpsample = (difratio*sampdiff)+m_inputBuffer[dd+currentposition];
+			audioframe[i] = interpsample*window[i];
+
+			sampdiff=m_inputBuffer[dd+currentposition+1-m_hop]-m_inputBuffer[dd+currentposition-m_hop];
+			interpsample = (difratio*sampdiff)+m_inputBuffer[dd+currentposition-m_hop];
+			prev_audioframe[i] = interpsample*window[i];
 		}
 		else {
-			audioframe[i] = (m_inputBuffer[i+1])*window[i];
-			processedframe[i] = (audioframe[i])*window[i];
+			audioframe[i] = m_inputBuffer[i+currentposition+1]*window[i];
+			processedframe[i] = audioframe[i]*window[i];
 		}
 	}
 	
-	FFTReal fft_object (m_framesize);
+	FFTReal fft_object(m_framesize);
 			
 	if (m_bypass == false)
 	{ 
-		fft_object.do_fft (FFTframe,audioframe);
+		fft_object.do_fft(FFTframe,audioframe);
 	
 		cart2pol(FFTframe, c_mags, c_phase, m_framesize);
 
 		//--------------------------------------------
 	
-		fft_object.do_fft (FFTframe,prev_audioframe);
+		fft_object.do_fft(FFTframe,prev_audioframe);
 
 		cart2pol(FFTframe, p_mags, p_phase, m_framesize);
 		
-		drum=transient_detect(c_mags, c_mags, p_mags, p_mags, drumthresh, m_framesize);
+		drum = transient_detect(c_mags, c_mags, p_mags, p_mags, drumthresh, m_framesize);
 	
 	
 		if (m_transcheck)
 		{
 	
-			if (drum && transhold==0){
+			if (drum && m_transhold==0){
 				cur2last(c_phase, c_synthphase, p_synthphase, m_framesize);
-				transhold=4;
+				m_transhold=4;
 			}
 			else{
 				if(m_peakcheck){
@@ -322,16 +328,16 @@
 			}
 		}
 	
-		if(transhold != 0){
-			transhold=transhold-1;
+		if(m_transhold != 0){
+			m_transhold = m_transhold - 1;
 		}
 	
 		drum = 0;
 		
 		pol2cart(FFTframe, c_mags, c_synthphase, m_framesize);
 		
-		fft_object.do_ifft (FFTframe,processedframe);
-		fft_object.rescale (processedframe); //VIP######## I have edited this function to do rewindowing also######
+		fft_object.do_ifft(FFTframe, processedframe);
+		fft_object.rescale(processedframe); //VIP######## I have edited this function to do rewindowing also######
 	}
 
 	for (int p = 0; p<(m_framesize); p++){
@@ -355,11 +361,6 @@
 			holdbuffer3[j]=processedframe[j+(m_framesize/4)];
 		}
 	}
-	
-	for (int i = 0; i<(m_framesize); i++)
-	{
-		prev_audioframe[i] = audioframe[i];
-	}
 }
 
 void TimeStretchFilter::getOutput(float **output, size_t samples)
@@ -378,8 +379,22 @@
 
 size_t TimeStretchFilter::getRequiredInputSamples(size_t outputSamplesNeeded)
 {
-	// max (m_framesize, outputSamplesNeeded*2)
-	size_t need = max( floor(m_framesize*m_interpfactor + 1), outputSamplesNeeded*2);
-	
+	size_t need = floor((m_framesize-1)*m_interpfactor + 1 + m_hop + 1);
 	return need;
+}
+
+size_t TimeStretchFilter::getRequiredSkipSamples()
+{
+	size_t skip = 1024;
+
+	if (m_bypass == false && m_transhold==0)
+	{
+			skip = floor(m_hop*hopfactor);
+	}
+	else
+	{		
+			skip = m_hop;
+	}
+
+	return skip;
 }
\ No newline at end of file
--- a/sv/filter/TimeStretchFilter.h	Tue Sep 04 07:54:09 2007 +0000
+++ b/sv/filter/TimeStretchFilter.h	Wed Sep 12 08:47:36 2007 +0000
@@ -38,6 +38,7 @@
 	virtual void getOutput(float **output, size_t samples);
 
 	virtual size_t getRequiredInputSamples(size_t outputSamplesNeeded);
+	size_t getRequiredSkipSamples();
 
 protected:
 
@@ -48,6 +49,8 @@
 	size_t	m_framesize;
 	int		m_hop;
 
+	int		m_transhold;
+
 	float *m_inputBuffer;
 
 	float m_interpfactor;