changeset 82:8ebc85f6ce4e

integrate Pitch-Time scaling filter from DIT
author lbajardsilogic
date Fri, 22 Jun 2007 09:54:00 +0000
parents 82c960dc79f7
children 76d4fbab5f20
files sv/audioio/AudioCallbackPlaySource.cpp sv/audioio/AudioPortAudioTarget.cpp sv/filter/DSP.cpp sv/filter/Filter.h sv/filter/FilterStack.cpp sv/filter/FilterStack.h sv/filter/RealTimeFilterFactory.cpp sv/filter/TimeStretchFilter.cpp sv/filter/TimeStretchFilter.h
diffstat 9 files changed, 221 insertions(+), 173 deletions(-) [+]
line wrap: on
line diff
--- a/sv/audioio/AudioCallbackPlaySource.cpp	Thu Jun 21 15:37:55 2007 +0000
+++ b/sv/audioio/AudioCallbackPlaySource.cpp	Fri Jun 22 09:54:00 2007 +0000
@@ -45,7 +45,7 @@
     m_bufferScavenger(1),
     m_sourceChannelCount(0),
     m_blockSize(1024),
-    m_sourceSampleRate(0),
+	m_sourceSampleRate(0),
     m_targetSampleRate(0),
     m_playLatency(0),
     m_playing(false),
@@ -1472,7 +1472,49 @@
 {
 	if (!m_filterStack) return;
 
-	m_filterStack->putInput(buffers, count);
+	size_t required = m_filterStack->getRequiredInputSamples(count);
+
+	if (required <= count)
+	{
+		m_filterStack->putInput(buffers, count);
+		
+	} else 
+	{
+		size_t missing = required - count;
+
+		size_t channels = getTargetChannelCount();
+
+		size_t got = required;
+
+		float **ib = (float**) malloc(channels*sizeof(float*));
+        
+        for (size_t c = 0; c < channels; ++c) {
+			ib[c] = (float*) malloc(required*sizeof(float));
+			for (int i=0; i<count; i++)
+			{
+				ib[c][i] = buffers[c][i];
+			}
+            RingBuffer<float> *rb = getReadRingBuffer(c);
+            if (rb) {
+				size_t gotHere = rb->peek(ib[c]+count, missing);
+				if (gotHere < got)
+					got = gotHere;
+			}
+        }
+		if (got < missing)
+		{
+			std::cerr << "ERROR applyRealTimeFilters(): Read underrun in playback ("
+                      << got << " < " << required << ")" << std::endl;
+			return; 
+		}
+
+        m_filterStack->putInput(ib, required);
+
+		for (size_t c = 0; c < channels; ++c) {
+			delete ib[c];
+		}
+		delete ib;
+	}
 	m_filterStack->getOutput(buffers, count);
 
 }
\ No newline at end of file
--- a/sv/audioio/AudioPortAudioTarget.cpp	Thu Jun 21 15:37:55 2007 +0000
+++ b/sv/audioio/AudioPortAudioTarget.cpp	Fri Jun 22 09:54:00 2007 +0000
@@ -48,7 +48,7 @@
     }
 
     m_bufferSize = 1024;
-    m_sampleRate = 44100;
+	m_sampleRate = 44100;
     if (m_source && (m_source->getSourceSampleRate() != 0)) {
 	m_sampleRate = m_source->getSourceSampleRate();
     }
--- a/sv/filter/DSP.cpp	Thu Jun 21 15:37:55 2007 +0000
+++ b/sv/filter/DSP.cpp	Fri Jun 22 09:54:00 2007 +0000
@@ -72,15 +72,11 @@
 
 void hanning(float* window, int framesize)
 {
-	
 	for (int f = 0; f<framesize; f++)
 	{
 		
 		window[f]= (0.5*(1-cos(2*PI*(f+1)/(framesize+1))));
-	
 	}
-
-
 }
 
 void updatephases(float* c_phase,float* p_phase,float* c_synthphase,float* p_synthphase, int framesize,float hopfactor,float interpfactor)
--- a/sv/filter/Filter.h	Thu Jun 21 15:37:55 2007 +0000
+++ b/sv/filter/Filter.h	Fri Jun 22 09:54:00 2007 +0000
@@ -40,6 +40,7 @@
      */
     virtual void getOutput(float **output, size_t samples) = 0;
 
+	virtual size_t getRequiredInputSamples(size_t outputSamplesNeeded) = 0;
 
 protected:
 
--- a/sv/filter/FilterStack.cpp	Thu Jun 21 15:37:55 2007 +0000
+++ b/sv/filter/FilterStack.cpp	Fri Jun 22 09:54:00 2007 +0000
@@ -105,4 +105,22 @@
 		Filter * filter = m_filters.begin()->second;
 		removeFilter(filter->objectName());
 	}
+}
+
+size_t FilterStack::getRequiredInputSamples(size_t outputSamplesNeeded)
+{
+	size_t max = 0;
+
+	std::map<int, Filter *>::iterator iter;
+
+	for (iter = m_filters.begin(); iter != m_filters.end(); iter++)
+	{
+		Filter * filter = iter->second;
+		size_t required = filter->getRequiredInputSamples(outputSamplesNeeded);
+		if (required > max)
+		{
+			max = required;
+		}
+	}
+	return max;
 }
\ No newline at end of file
--- a/sv/filter/FilterStack.h	Thu Jun 21 15:37:55 2007 +0000
+++ b/sv/filter/FilterStack.h	Fri Jun 22 09:54:00 2007 +0000
@@ -37,6 +37,8 @@
 
 	QString getUniqueFilterName(QString candidate);
 
+	size_t getRequiredInputSamples(size_t outputSamplesNeeded);
+
 signals:
 	void newFilterAdded(Filter *);
 	void filterRemoved(QString);
--- a/sv/filter/RealTimeFilterFactory.cpp	Thu Jun 21 15:37:55 2007 +0000
+++ b/sv/filter/RealTimeFilterFactory.cpp	Fri Jun 22 09:54:00 2007 +0000
@@ -42,14 +42,14 @@
 QString RealTimeFilterFactory::getFilterLabel(FilterType type)
 {
 	switch (type) {
-		case TimeStretch: return "Time Stretching";
+		case TimeStretch: return "Pitch-Time Stretching";
 		default: return "unknown";
     }
 }
 
 RealTimeFilterFactory::FilterType RealTimeFilterFactory::getFilterType(QString strType)
 {
-	if (strType == "Time Stretching") {
+	if (strType == "Pitch-Time Stretching") {
 		return TimeStretch;
     }
 	else
--- a/sv/filter/TimeStretchFilter.cpp	Thu Jun 21 15:37:55 2007 +0000
+++ b/sv/filter/TimeStretchFilter.cpp	Fri Jun 22 09:54:00 2007 +0000
@@ -12,13 +12,16 @@
 */
 
 #include <math.h>
+#include <iostream>
 
 #include "TimeStretchFilter.h"
 
 #include  "FFTReal.h"
 #include  "DSP.h"
 
-float *audioframe;
+#include "system/System.h"
+
+/*float *audioframe;
 float *prev_audioframe;
 float *window;
 float *processedframe;
@@ -27,96 +30,78 @@
 float *holdbuffer2;
 float *holdbuffer1;
 
-float *c_mags;		///CURRENT FRAME MAGNITUDES
-float *p_mags;		///PREVIOUS FRAME MAGNITUDES
 float *c_phase;		///CURRENT FRAME phases
 float *p_phase;		///PREVIOUS FRAME phases
 float *c_synthphase;
 float *p_synthphase;
-float *synthframe;
+float *synthframe;*/
+
+float *c_mags;		///CURRENT FRAME MAGNITUDES
+float *p_mags;		///PREVIOUS FRAME MAGNITUDES
 
 float *FFTframe;
-//FFTReal fft_object;
-int framesize = 1024;
-int hop = framesize/4;
-float volume = 1;
-float interpfactor = 1;
-int currentposition = hop+1;
-float *wavdata;
-int dd;
-float sampdiff;
-float difratio;
-float interpsample;
-float maxvalue = 0; 
-
 
 float hopfactor = 1;
+
+//need in DSP.cpp
 float lastfactor;
-int filelength;
-
-char byte1, byte2;
-int samplevalue;
-char *pbyte1 = &byte1;; 
-char *pbyte2 = &byte2;;
-char *buffer;
-int tempval = 0;
-bool drum = 0;
-float drumthresh = 65;
-int transhold = 0;
 
 int numpeaks;
 float *peak_locations; 
+int currentposition = 1024+1;//= hop+1;
+//
 
-TimeStretchFilter::TimeStretchFilter() : Filter()
+TimeStretchFilter::TimeStretchFilter() : Filter(),
+	m_bypass(false),
+	m_transcheck(false),
+	m_peakcheck(false),
+	m_framesize(4096),
+	m_interpfactor(1)
 {
-	m_bypass = true;
-	m_transcheck = false;
-	m_peakcheck = false;
+	m_hop = m_framesize/4;
 
+	m_inputBuffer = (float *)calloc((m_framesize*2+1), sizeof(float));
+	
 	/**********malloc***********/
-/*	FFTframe=(float *)calloc((framesize), sizeof(float));
+	FFTframe=(float *)calloc((m_framesize), sizeof(float));
 			
 	//This block specifically sets up the buffers required to do a 75% overlap scheme
-	audioframe=(float *)calloc((framesize), sizeof(float));			//The current frame
-	prev_audioframe=(float *)calloc((framesize), sizeof(float));
-	window=(float *)calloc((framesize), sizeof(float));				//Window
-	processedframe=(float *)calloc((framesize), sizeof(float));			//The current frame
-	synthframe=(float *)calloc((framesize), sizeof(float));
-	outbuffer=(float *)calloc((framesize/4), sizeof(float));			//The current output segment which is 1/4 framesize for 75% overlap		
+	audioframe=(float *)calloc((m_framesize), sizeof(float));			//The current frame
+	prev_audioframe=(float *)calloc((m_framesize), sizeof(float));
+	window=(float *)calloc((m_framesize), sizeof(float));				//Window
+	processedframe=(float *)calloc((m_framesize), sizeof(float));			//The current frame
+	synthframe=(float *)calloc((m_framesize), sizeof(float));
+	outbuffer=(float *)calloc((m_framesize/4), sizeof(float));			//The current output segment which is 1/4 framesize for 75% overlap		
 			
-	holdbuffer3=(float *)calloc((framesize*0.75), sizeof(float));	//The hold buffer for the previous frame segment
-	holdbuffer2=(float *)calloc((framesize/2), sizeof(float));		//The fold buffer for the frame segment 2 frames ago
-	holdbuffer1=(float *)calloc((framesize/4), sizeof(float));
+	holdbuffer3=(float *)calloc((m_framesize*0.75), sizeof(float));	//The hold buffer for the previous frame segment
+	holdbuffer2=(float *)calloc((m_framesize/2), sizeof(float));		//The fold buffer for the frame segment 2 frames ago
+	holdbuffer1=(float *)calloc((m_framesize/4), sizeof(float));
 
-	c_mags=(float *)calloc((framesize/2), sizeof(float));			//The magnitude and phase arrays
-	p_mags=(float *)calloc((framesize/2), sizeof(float));
-	c_phase=(float *)calloc((framesize/2), sizeof(float));
-	p_phase=(float *)calloc((framesize/2), sizeof(float));
-	c_synthphase=(float *)calloc((framesize/2), sizeof(float));
-	p_synthphase=(float *)calloc((framesize/2), sizeof(float));
+	c_mags=(float *)calloc((m_framesize/2), sizeof(float));			//The magnitude and phase arrays
+	p_mags=(float *)calloc((m_framesize/2), sizeof(float));
+	c_phase=(float *)calloc((m_framesize/2), sizeof(float));
+	p_phase=(float *)calloc((m_framesize/2), sizeof(float));
+	c_synthphase=(float *)calloc((m_framesize/2), sizeof(float));
+	p_synthphase=(float *)calloc((m_framesize/2), sizeof(float));
 
-	peak_locations=(float *)calloc((framesize/2), sizeof(float));
-
-	buffer=(char *)calloc((framesize/2), sizeof(char));
-
-	wavdata = (float*)calloc((framesize*2), sizeof(float));
+	peak_locations=(float *)calloc((m_framesize/2), sizeof(float));
 
-	hanning(window, framesize);
-*/
+	hanning(window, m_framesize);
+
 	/***************************/
 }
 
 TimeStretchFilter::~TimeStretchFilter()
 {
 	/**********de-alloc***********/
-/*	delete FFTframe;
+	delete m_inputBuffer;
+	delete FFTframe;
 			
 	delete audioframe;		
 	delete prev_audioframe;
 	delete window;			
 	delete processedframe;	
 	delete synthframe;
-	//delete outbuffer;	
 			
 	delete holdbuffer3;
 	delete holdbuffer2;
@@ -131,10 +116,8 @@
 
 	delete peak_locations;
 
-	delete buffer;
+	delete outbuffer;	
 
-	delete outbuffer;	
-*/
 	/***************************/
 }
 
@@ -176,20 +159,31 @@
 	int val = 0;
 
     if (name == "Time") {
-
 		if (min) *min = -100;
 		if (max) *max = 100;
 		if (deflt) *deflt = 0;
-
 	}
 
 	if (name == "Pitch") {
-
 		if (min) *min = -100;
 		if (max) *max = 100;
 		if (deflt) *deflt = 0;
+	}
 
-	}
+	if (name == "Bypass") {
+        if (deflt) *deflt = 0;
+		val = (m_bypass ? 1 : 0);
+    }
+
+	if (name == "Transdetect") {
+        if (deflt) *deflt = 0;
+		val = (m_transcheck ? 1 : 0);
+    }
+
+	if (name == "Peaklock") {
+        if (deflt) *deflt = 0;
+		val = (m_peakcheck ? 1 : 0);
+    }
 
     return val;
 }
@@ -209,26 +203,26 @@
 void TimeStretchFilter::setProperty(const PropertyName &name, int value)
 {
     if (name == "Time") {
-		int tmaxfactor=2;
+		float tmaxfactor=2;
 		if (value > 0){
-			hopfactor=1+((tmaxfactor-1)*(value/100));
+			hopfactor=1.0+((tmaxfactor-1)*(((float)value)/100));
 		}
 		if (value < 0){
-			hopfactor=1/(1+((tmaxfactor-1)*((-value)/100)));
+			hopfactor=1.0/(1.0+((tmaxfactor-1)*(-((float)value)/100)));
 		}
 		if(value == 0){
 			hopfactor=1;
 		}
 	} else if (name == "Pitch") {
-		int pmaxfactor=2;
+		float pmaxfactor=2;
 		if (value > 0){
-			interpfactor=1+((pmaxfactor-1)*(value/100));
+			m_interpfactor=1.0+((pmaxfactor-1)*(((float)value)/100));
 		}
 		if (value < 0){
-			interpfactor=1/(1+((pmaxfactor-1)*((-value)/100)));
+			m_interpfactor=1.0/(1.0+((pmaxfactor-1)*(-((float)value)/100)));
 		}
 		if(value == 0){
-			interpfactor=1;
+			m_interpfactor=1;
 		}
 	} else if (name == "Bypass"){
 		m_bypass = (value > 0) ? true : false;
@@ -241,19 +235,29 @@
 }
 
 void TimeStretchFilter::putInput(float **input, size_t samples)
-{
-/*	int i;
+{	
+	int dd;
+	float sampdiff;
+	float difratio;
+	float interpsample;
+
+	bool drum = 0;
+	float drumthresh = 65;
+	int transhold = 0;
+
+	if (samples < floor(m_framesize*m_interpfactor + 1))
+		return;
+
 	int channel = 2;
-
-	for (i=0; i<framesize; i++){
-		wavdata[2*i]=input[0][i];
-		wavdata[2*i+1]=input[1][i];
-		//wavdata[2*i+1]=input[0][i];
+
+	for (int i=0; i<samples; i++){
+		if (channel > 1)
+			m_inputBuffer[i] = (input[0][i] + input[1][i]) /2;
+		else
+			m_inputBuffer[i] = input[0][i];
 	}
-
-	currentposition=hop+1;
-
-	for (int i = 0; i<(framesize); i++)
+	
+	for (int i = 0; i<(m_framesize); i++)
 	{
 			
 		//This block was specifically written to do resampling interpolation for crude pitch shifting
@@ -261,82 +265,60 @@
 		//At
 		
 		if (m_bypass == false) {
-			dd = floor(double(i*interpfactor));
-			difratio = (double(i*interpfactor)) - floor(double(i*interpfactor));
-		
-		
+			dd = floor(double(i*m_interpfactor));
+			difratio = (double(i*m_interpfactor)) - floor(double(i*m_interpfactor));
+			
 			// this block loads a frame as normal
-			sampdiff=wavdata[dd+currentposition+1]-wavdata[dd+currentposition];
-			interpsample = (difratio*sampdiff)+wavdata[dd+currentposition];
-			audioframe[i] = (interpsample*32767*volume)*window[i];
-		
-			// this block loads a frame exactly 1 hop back. This is used only forthe purposes of an efficient 
-			// way to calculate phase differences without having to use hetrodyning as suggested by Dave Dorran
-			sampdiff=wavdata[dd+currentposition+1-hop]-wavdata[dd+currentposition-hop];
-			interpsample = (difratio*sampdiff)+wavdata[dd+currentposition-hop];
-			prev_audioframe[i] = (interpsample*32767*volume)*window[i];
-		
-			//processedframe[i] = (audioframe[i])*(0.5*(1-cos(2*PI*(i)/framesize)));   ///needs to happen after processing
+			sampdiff=m_inputBuffer[dd+1]-m_inputBuffer[dd];
+			interpsample = (difratio*sampdiff)+m_inputBuffer[dd];
+			audioframe[i] = (interpsample)*window[i];
 		}
 		else {
-			audioframe[i] = (wavdata[i+currentposition+1]*32767*volume)*window[i];
+			audioframe[i] = (m_inputBuffer[i+1])*window[i];
 			processedframe[i] = (audioframe[i])*window[i];
 		}
-		//--------------------------------------------------------------------------------------------
-		
-		//calculate time frame stats here
-		if (audioframe[i]> maxvalue){
-			maxvalue=audioframe[i];
-		}	
 	}
-		
-	//This maxvalue is the paeak in the frame. The progress bar is on
-	//a timer event which checks this value in order to have a realtime
-	//display update for the peak meter.
-	tempval = ((maxvalue/32767)*2)*100;
-	maxvalue = maxvalue*.9;
 	
-	//---------------------------------------------------------------------------------
-	FFTReal fft_object (framesize);
+	FFTReal fft_object (m_framesize);
 			
 	if (m_bypass == false)
 	{ 
 		fft_object.do_fft (FFTframe,audioframe);
 	
-		cart2pol(FFTframe, c_mags, c_phase, framesize);
+		cart2pol(FFTframe, c_mags, c_phase, m_framesize);
 
 		//--------------------------------------------
 	
 		fft_object.do_fft (FFTframe,prev_audioframe);
 
-		cart2pol(FFTframe, p_mags, p_phase, framesize);
+		cart2pol(FFTframe, p_mags, p_phase, m_framesize);
 		
-		drum=transient_detect(c_mags, c_mags, p_mags, p_mags, drumthresh, framesize);
+		drum=transient_detect(c_mags, c_mags, p_mags, p_mags, drumthresh, m_framesize);
 	
 	
 		if (m_transcheck)
 		{
 	
 			if (drum && transhold==0){
-				cur2last(c_phase, c_synthphase, p_synthphase, framesize);
+				cur2last(c_phase, c_synthphase, p_synthphase, m_framesize);
 				transhold=4;
 			}
 			else{
 				if(m_peakcheck){
-					rotatephases_peaklocked(c_phase, p_phase, c_synthphase, p_synthphase, framesize, interpfactor);
+					rotatephases_peaklocked(c_phase, p_phase, c_synthphase, p_synthphase, m_framesize, m_interpfactor);
 				}
 				else{
-					rotatephases(c_phase, p_phase, c_synthphase, p_synthphase, framesize, interpfactor);
+					rotatephases(c_phase, p_phase, c_synthphase, p_synthphase, m_framesize, m_interpfactor);
 				}
 			}
 		}
 		else
 		{
 			if(m_peakcheck){
-				rotatephases_peaklocked(c_phase, p_phase, c_synthphase, p_synthphase, framesize, interpfactor);
+				rotatephases_peaklocked(c_phase, p_phase, c_synthphase, p_synthphase, m_framesize, m_interpfactor);
 			}
 			else{
-				rotatephases(c_phase, p_phase, c_synthphase, p_synthphase, framesize, interpfactor);
+				rotatephases(c_phase, p_phase, c_synthphase, p_synthphase, m_framesize, m_interpfactor);
 			}
 		}
 	
@@ -345,72 +327,59 @@
 		}
 	
 		drum = 0;
-		//---------------------------------------
 		
-		//updatephases2(c_phase, p_phase, c_synthphase, p_synthphase, framesize, hopfactor, interpfactor);
-
-		//calculate freqframe stats here
-
-		//process freq domian here
-		
-		pol2cart(FFTframe, c_mags, c_synthphase, framesize);
+		pol2cart(FFTframe, c_mags, c_synthphase, m_framesize);
 		
 		fft_object.do_ifft (FFTframe,processedframe);
 		fft_object.rescale (processedframe); //VIP######## I have edited this function to do rewindowing also######
 	}
 
-	//-----------------------------------------------------------------------------------
-	
-	for (int p = 0; p<(framesize); p++){
+	for (int p = 0; p<(m_framesize); p++){
 		processedframe[p]=processedframe[p]*window[p];
 	}
 	
-	for (int j = 0; j<(framesize); j++)
+	for (int j = 0; j<(m_framesize); j++)
 	{		
 		//This block deals with the buffers for a 75% overlap scheme		
 		
-		if (j < framesize/4){
+		if (j < m_framesize/4){
 			outbuffer[j]=(processedframe[j]+holdbuffer1[j]+holdbuffer2[j]+holdbuffer3[j])*0.5;
-			holdbuffer1[j]=holdbuffer2[j+(framesize/4)];
+			holdbuffer1[j]=holdbuffer2[j+(m_framesize/4)];
 		}
 
-		if (j < framesize/2){
-			holdbuffer2[j]=holdbuffer3[j+(framesize/4)];
+		if (j < m_framesize/2){
+			holdbuffer2[j]=holdbuffer3[j+(m_framesize/4)];
 		}
 
-		if (j < framesize*0.75){
-			holdbuffer3[j]=processedframe[j+(framesize/4)];
+		if (j < m_framesize*0.75){
+			holdbuffer3[j]=processedframe[j+(m_framesize/4)];
 		}
-
-		if (j < framesize/4){
-			samplevalue = outbuffer[j];
-			intobyte(samplevalue, pbyte1, pbyte2);
-			buffer[j*2] = byte1;
-			buffer[j*2+1] = byte2;
-		}	
-	}
-
-	if (m_bypass == false && transhold==0) {
-		currentposition = currentposition + floor(hop*hopfactor);
-	}
-	else {		
-		currentposition = currentposition + hop;
-	} // LB + floor(hop)
-				
-	if (filelength - currentposition < framesize*2){
-		currentposition=hop+1;
 	}
-	*/
+	
+	for (int i = 0; i<(m_framesize); i++)
+	{
+		prev_audioframe[i] = audioframe[i];
+	}
 }
 
 void TimeStretchFilter::getOutput(float **output, size_t samples)
 {
-	/*int i;
+	if (samples > m_framesize/4)
+		return;
+
 	int channel = 2;
-
-	for (i=0; i<samples; i++){
-		output[0][i] = outbuffer[i*2];
-		output[1][i] = outbuffer[i*2+1];
-	}*/
+
+	for (int i=0; i<samples; i++){
+		output[0][i] = outbuffer[i];
+		if (channel > 1)
+			output[1][i] = outbuffer[i];
+	}
+}
+
+size_t TimeStretchFilter::getRequiredInputSamples(size_t outputSamplesNeeded)
+{
+	// max (m_framesize, outputSamplesNeeded*2)
+	size_t need = max( floor(m_framesize*m_interpfactor + 1), outputSamplesNeeded*2);
 	
+	return need;
 }
\ No newline at end of file
--- a/sv/filter/TimeStretchFilter.h	Thu Jun 21 15:37:55 2007 +0000
+++ b/sv/filter/TimeStretchFilter.h	Fri Jun 22 09:54:00 2007 +0000
@@ -37,15 +37,35 @@
 	virtual void putInput(float **input, size_t samples);
 	virtual void getOutput(float **output, size_t samples);
 
+	virtual size_t getRequiredInputSamples(size_t outputSamplesNeeded);
+
 protected:
 
 	bool m_bypass;
 	bool m_transcheck;
 	bool m_peakcheck;
 		
-
+	size_t	m_framesize;
+	int		m_hop;
 
+	float *m_inputBuffer;
 
+	float m_interpfactor;
+
+	float *audioframe;
+	float *prev_audioframe;
+	float *window;
+	float *processedframe;
+	float *outbuffer;
+	float *holdbuffer3;
+	float *holdbuffer2;
+	float *holdbuffer1;
+
+	float *c_phase;		///CURRENT FRAME phases
+	float *p_phase;		///PREVIOUS FRAME phases
+	float *c_synthphase;
+	float *p_synthphase;
+	float *synthframe;
 
 };