view sv/filter/MultiRealTimeFilter.cpp @ 223:c413e82a4812

reorganise RealTimeFilter for Equalizer integration
author lbajardsilogic
date Mon, 11 Feb 2008 15:17:54 +0000
parents
children 7d5d51145b81
line wrap: on
line source
/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */

/*	Sound Access	
		EASAIER client application.	
		Silogic 2007. Laure Bajard. 
	
	Integration of the filter provided by:
	Dublin Institute of Technology - Audio Research Group 2007
	www.audioresearchgroup.com
	Author: Dan Barry

	This program is free software; you can redistribute it and/or    
	modify it under the terms of the GNU General Public License as    
	published by the Free Software Foundation; either version 2 of the    
	License, or (at your option) any later version.  See the file    
	COPYING included with this distribution for more information.
*/

#include <math.h>
#include <iostream>

#include "MultiRealTimeFilter.h"

#include  "FFTReal.h"
#include  "DSP.h"

#include "system/System.h"
#include "main/MainWindow.h"

/*float *audioframe;
float *prev_audioframe;
float *window;
float *processedframe;
float *outbuffer;
float *holdbuffer3;
float *holdbuffer2;
float *holdbuffer1;

float *c_phase;		///CURRENT FRAME phases
float *p_phase;		///PREVIOUS FRAME phases
float *c_synthphase;
float *p_synthphase;
float *synthframe;*/

float *c_mags;		///CURRENT FRAME MAGNITUDES
float *p_mags;		///PREVIOUS FRAME MAGNITUDES

float *FFTframe;

extern float hopfactor;

//need in DSP.cpp
float lastfactor;

int numpeaks;
float *peak_locations; 
int currentposition;
//

MultiRealTimeFilter::MultiRealTimeFilter() : Filter(),
	m_framesize(4096),
	m_transhold(0)
{
	m_hop = m_framesize/4;

	currentposition = m_hop;

	m_timeStretchFilter = new TimeStretchFilter();
	m_equalizerFilter = new EqualizerFilter();

	m_filterCollection.push_back(m_timeStretchFilter);
	m_filterCollection.push_back(m_equalizerFilter);

	connect(m_timeStretchFilter, SIGNAL(filterEnabled(bool)), this, SLOT(setFilterEnabled(bool)));
	connect(m_equalizerFilter, SIGNAL(filterEnabled(bool)), this, SLOT(setFilterEnabled(bool)));

	m_inputBuffer = (float *)calloc(m_framesize*(m_timeStretchFilter->getMaxPitchFactor()+1), sizeof(float));
	
	/**********malloc***********/
	FFTframe=(float *)calloc((m_framesize), sizeof(float));
			
	//This block specifically sets up the buffers required to do a 75% overlap scheme
	audioframe=(float *)calloc((m_framesize), sizeof(float));			//The current frame
	prev_audioframe=(float *)calloc((m_framesize), sizeof(float));
	window=(float *)calloc((m_framesize), sizeof(float));				//Window
	processedframe=(float *)calloc((m_framesize), sizeof(float));			//The current frame
	synthframe=(float *)calloc((m_framesize), sizeof(float));
	outbuffer=(float *)calloc((m_framesize/4), sizeof(float));			//The current output segment which is 1/4 framesize for 75% overlap		
	
	holdbuffer3=(float *)calloc((m_framesize*0.75), sizeof(float));	//The hold buffer for the previous frame segment
	holdbuffer2=(float *)calloc((m_framesize/2), sizeof(float));		//The fold buffer for the frame segment 2 frames ago
	holdbuffer1=(float *)calloc((m_framesize/4), sizeof(float));

	c_mags=(float *)calloc((m_framesize/2), sizeof(float));			//The magnitude and phase arrays
	p_mags=(float *)calloc((m_framesize/2), sizeof(float));
	c_phase=(float *)calloc((m_framesize/2), sizeof(float));
	p_phase=(float *)calloc((m_framesize/2), sizeof(float));
	c_synthphase=(float *)calloc((m_framesize/2), sizeof(float));
	p_synthphase=(float *)calloc((m_framesize/2), sizeof(float));

	peak_locations=(float *)calloc((m_framesize/2), sizeof(float));

	hanning(window, m_framesize);

	connect(this, SIGNAL(playSpeedChanged(float)),
		MainWindow::instance(), SLOT(playSpeedChanged(float)));

	hopfactor = 1;
	/***************************/
}

MultiRealTimeFilter::~MultiRealTimeFilter()
{
	/**********de-alloc***********/
	delete m_inputBuffer;
	delete FFTframe;
			
	delete audioframe;		
	delete prev_audioframe;
	delete window;			
	delete processedframe;	
	delete synthframe;
			
	delete holdbuffer3;
	delete holdbuffer2;
	delete holdbuffer1;
			
	delete c_mags;
	delete p_mags;
	delete c_phase;
	delete p_phase;
	delete c_synthphase;
	delete p_synthphase;

	delete peak_locations;

	delete outbuffer;	

	hopfactor = 1;

	emit playSpeedChanged(1);

	/***************************/
}

void MultiRealTimeFilter::putInput(float **input, size_t samples)
{	
	int dd;
	float sampdiff;
	float difratio;
	float interpsample;

	bool drum = 0;
	float drumthresh = 65;

	int delta = m_hop; //the "current position" is shifted of m_hop

	if ( samples < ( (size_t) floor(m_framesize*(m_timeStretchFilter->getPitchFactor() + 1)) ) )
		return;

	int channel = getSourceChannelCount();

	for (int i=0; i< ((int) samples); i++){
		if (channel > 1)
			m_inputBuffer[i] = input[0][i];// + input[1][i]) /2;
		else
			m_inputBuffer[i] = input[0][i];
	}
	
	for (int i = 0; i< ((int) m_framesize); i++)
	{
			
		//This block was specifically written to do resampling interpolation for crude pitch shifting
		//if it's not being used the audioframe line after the else should be used which is also used in bypass mode
		//At
		
		if (m_timeStretchFilter->bypass() == false) {
			dd = floor(double(i*m_timeStretchFilter->getPitchFactor()));
			difratio = (double(i*m_timeStretchFilter->getPitchFactor())) - floor(double(i*m_timeStretchFilter->getPitchFactor()));
			
			// this block loads a frame as normal
			sampdiff=m_inputBuffer[dd+delta+1]-m_inputBuffer[dd+delta];
			interpsample = (difratio*sampdiff)+m_inputBuffer[dd+delta];
			audioframe[i] = interpsample*window[i];

			sampdiff=m_inputBuffer[dd+delta+1-m_hop]-m_inputBuffer[dd+delta-m_hop];
			interpsample = (difratio*sampdiff)+m_inputBuffer[dd+delta-m_hop];
			prev_audioframe[i] = interpsample*window[i];
		}
		else {
			audioframe[i] = m_inputBuffer[i+delta+1]*window[i];
			processedframe[i] = audioframe[i]*window[i];
		}
	}
	
	FFTReal fft_object(m_framesize);
			
	if (m_timeStretchFilter->bypass() == false)
	{ 
		fft_object.do_fft(FFTframe,audioframe);
	
		cart2pol(FFTframe, c_mags, c_phase, m_framesize);

		//--------------------------------------------
	
		fft_object.do_fft(FFTframe,prev_audioframe);

		cart2pol(FFTframe, p_mags, p_phase, m_framesize);
		
		drum = transient_detect(c_mags, c_mags, p_mags, p_mags, drumthresh, m_framesize);
	
	
		if (m_timeStretchFilter->transcheck())
		{
	
			if (drum && m_transhold==0){
				cur2last(c_phase, c_synthphase, p_synthphase, m_framesize);
				m_transhold=4;
			}
			else{
				if(m_timeStretchFilter->peakcheck()){
					rotatephases_peaklocked(c_phase, p_phase, c_synthphase, p_synthphase, m_framesize, m_timeStretchFilter->getPitchFactor());
				}
				else{
					rotatephases(c_phase, p_phase, c_synthphase, p_synthphase, m_framesize, m_timeStretchFilter->getPitchFactor());
				}
			}
		}
		else
		{
			if(m_timeStretchFilter->peakcheck()){
				rotatephases_peaklocked(c_phase, p_phase, c_synthphase, p_synthphase, m_framesize, m_timeStretchFilter->getPitchFactor());
			}
			else{
				rotatephases(c_phase, p_phase, c_synthphase, p_synthphase, m_framesize, m_timeStretchFilter->getPitchFactor());
			}
		}
	
		if(m_transhold != 0){
			m_transhold = m_transhold - 1;
		}
	
		drum = 0;
		
		pol2cart(FFTframe, c_mags, c_synthphase, m_framesize);
		
		fft_object.do_ifft(FFTframe, processedframe);
		fft_object.rescale(processedframe); //VIP######## I have edited this function to do rewindowing also######
	}

	for (int p = 0; p < ((int) m_framesize); p++){
		processedframe[p]=processedframe[p]*window[p];
	}
	
	for (int j = 0; j< ((int) m_framesize); j++)
	{		
		//This block deals with the buffers for a 75% overlap scheme		
		
		if (j < ((int) m_framesize)/4){
			outbuffer[j]=(processedframe[j]+holdbuffer1[j]+holdbuffer2[j]+holdbuffer3[j])*0.5;
			holdbuffer1[j]=holdbuffer2[j+(m_framesize/4)];
		}

		if (j < ((int) m_framesize)/2){
			holdbuffer2[j]=holdbuffer3[j+(m_framesize/4)];
		}

		if (j < ((int) m_framesize)*0.75){
			holdbuffer3[j]=processedframe[j+(m_framesize/4)];
		}
	}
}

void MultiRealTimeFilter::getOutput(float **output, size_t samples)
{
	if (samples > m_framesize/4)
		return;

	int channel = getSourceChannelCount();

	for (int ch = 0; ch < channel; ++ch) 
	{
		for (size_t i = 0; i < samples; ++i) {
			output[ch][i] = outbuffer[i];
		}
	}
}

size_t MultiRealTimeFilter::getRequiredInputSamples(size_t outputSamplesNeeded)
{
	size_t need = (size_t) (floor(m_framesize*(m_timeStretchFilter->getPitchFactor() + 1)));
	return need;
}

size_t MultiRealTimeFilter::getRequiredSkipSamples()
{
	size_t skip = 1024;

	if (m_timeStretchFilter->bypass() == false && m_transhold==0)
	{
			skip = floor(m_hop*hopfactor);
			currentposition += floor(m_hop*hopfactor);
	}
	else
	{		
			skip = m_hop;
			currentposition += m_hop;
	}
	return skip;
}

void MultiRealTimeFilter::setFilterEnabled(bool b)
{
	filterEnabled = (m_timeStretchFilter->isEnabled() || m_equalizerFilter->isEnabled());
	/*filterEnabled=b;
	
	if (filterEnabled)
	{
		if (!m_timeStretchFilter->bypass())
			emit playSpeedChanged(hopfactor);
	}
	else 
		emit playSpeedChanged(1);*/
}

void MultiRealTimeFilter::setFilterEnabled(int b)
{
	filterEnabled = (m_timeStretchFilter->isEnabled() || m_equalizerFilter->isEnabled());
	/*filterEnabled=b;
	
	if (filterEnabled)
	{
		if (!m_timeStretchFilter->bypass())
			emit playSpeedChanged(hopfactor);
	}
	else 
		emit playSpeedChanged(1);*/
}