andrewm@0: /*
andrewm@0: This code accompanies the textbook:
andrewm@0:
andrewm@0: Digital Audio Effects: Theory, Implementation and Application
andrewm@0: Joshua D. Reiss and Andrew P. McPherson
andrewm@0:
andrewm@0: ---
andrewm@0:
andrewm@0: Robotisation: robot effect using phase vocoder;
andrewm@0: see also whisperisation code in processBlock() below.
andrewm@0:
andrewm@0: See textbook Chapter 8: The Phase Vocoder
andrewm@0:
andrewm@0: Code by Andrew McPherson, Brecht De Man and Joshua Reiss
andrewm@0:
andrewm@0: This code requires the fftw library version 3 to compile:
andrewm@0: http://fftw.org
andrewm@0:
andrewm@0: ---
andrewm@0:
andrewm@0: This program is free software: you can redistribute it and/or modify
andrewm@0: it under the terms of the GNU General Public License as published by
andrewm@0: the Free Software Foundation, either version 3 of the License, or
andrewm@0: (at your option) any later version.
andrewm@0:
andrewm@0: This program is distributed in the hope that it will be useful,
andrewm@0: but WITHOUT ANY WARRANTY; without even the implied warranty of
andrewm@0: MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
andrewm@0: GNU General Public License for more details.
andrewm@0:
andrewm@0: You should have received a copy of the GNU General Public License
andrewm@0: along with this program. If not, see .
andrewm@0: */
andrewm@0:
andrewm@0:
andrewm@0: #include "PluginProcessor.h"
andrewm@0: #include "PluginEditor.h"
andrewm@0:
andrewm@0:
andrewm@0: //==============================================================================
andrewm@0: RobotisationAudioProcessor::RobotisationAudioProcessor() : inputBuffer_(2, 1), outputBuffer_(2, 1)
andrewm@0: {
andrewm@0: // Set default values:
andrewm@0: fftSelectedSize_ = 512;
andrewm@0: hopSelectedSize_ = hopActualSize_ = 256;
andrewm@0: windowType_ = kWindowRectangular;
andrewm@0:
andrewm@0: fftInitialised_ = false;
andrewm@0: fftActualTransformSize_ = 0;
andrewm@0: inputBufferLength_ = 1;
andrewm@0: outputBufferLength_ = 1;
andrewm@0: inputBufferWritePosition_ = outputBufferWritePosition_ = outputBufferReadPosition_ = 0;
andrewm@0: samplesSinceLastFFT_ = 0;
andrewm@0: windowBuffer_ = 0;
andrewm@0: windowBufferLength_ = 0;
andrewm@0: preparedToPlay_ = false;
andrewm@0: fftScaleFactor_ = 0.0;
andrewm@0:
andrewm@0: lastUIWidth_ = 370;
andrewm@0: lastUIHeight_ = 120;
andrewm@0: }
andrewm@0:
andrewm@0: RobotisationAudioProcessor::~RobotisationAudioProcessor()
andrewm@0: {
andrewm@0: // Release FFT resources if allocated. This should be handled by
andrewm@0: // releaseResources() but in the event it doesn't happen, this avoids
andrewm@0: // a leak. Harmless to call it twice.
andrewm@0: deinitFFT();
andrewm@0: deinitWindow();
andrewm@0: }
andrewm@0:
andrewm@0: //==============================================================================
andrewm@0: const String RobotisationAudioProcessor::getName() const
andrewm@0: {
andrewm@0: return JucePlugin_Name;
andrewm@0: }
andrewm@0:
andrewm@0: int RobotisationAudioProcessor::getNumParameters()
andrewm@0: {
andrewm@0: return kNumParameters;
andrewm@0: }
andrewm@0:
andrewm@0: float RobotisationAudioProcessor::getParameter (int index)
andrewm@0: {
andrewm@0: // This method will be called by the host, probably on the audio thread, so
andrewm@0: // it's absolutely time-critical. Don't use critical sections or anything
andrewm@0: // UI-related, or anything at all that may block in any way!
andrewm@0: switch (index)
andrewm@0: {
andrewm@0: case kFFTSizeParam: return (float)fftSelectedSize_;
andrewm@0: case kHopSizeParam: return (float)hopSelectedSize_;
andrewm@0: case kWindowTypeParam: return (float)windowType_;
andrewm@0: default: return 0.0f;
andrewm@0: }
andrewm@0: }
andrewm@0:
andrewm@0: void RobotisationAudioProcessor::setParameter (int index, float newValue)
andrewm@0: {
andrewm@0: // This method will be called by the host, probably on the audio thread, so
andrewm@0: // it's absolutely time-critical. Don't use critical sections or anything
andrewm@0: // UI-related, or anything at all that may block in any way!
andrewm@0: switch (index)
andrewm@0: {
andrewm@0: case kFFTSizeParam:
andrewm@0: if((int)newValue != fftSelectedSize_)
andrewm@0: {
andrewm@0: fftSelectedSize_ = (int)newValue;
andrewm@0: if(preparedToPlay_)
andrewm@0: {
andrewm@0: // Update settings if currently playing, else wait until prepareToPlay() called
andrewm@0: initFFT(fftSelectedSize_);
andrewm@0: initWindow(fftSelectedSize_, windowType_);
andrewm@0: }
andrewm@0: }
andrewm@0: break;
andrewm@0: case kHopSizeParam:
andrewm@0: hopSelectedSize_ = (int)newValue;
andrewm@0: if(preparedToPlay_)
andrewm@0: updateHopSize();
andrewm@0: break;
andrewm@0: case kWindowTypeParam:
andrewm@0: // Recalculate window if needed
andrewm@0: if((int)newValue != windowType_)
andrewm@0: {
andrewm@0: windowType_ = (int)newValue;
andrewm@0: if(preparedToPlay_)
andrewm@0: initWindow(fftActualTransformSize_, (int)newValue);
andrewm@0: }
andrewm@0: break;
andrewm@0: default:
andrewm@0: break;
andrewm@0: }
andrewm@0: }
andrewm@0:
andrewm@0: const String RobotisationAudioProcessor::getParameterName (int index)
andrewm@0: {
andrewm@0: switch (index)
andrewm@0: {
andrewm@0: case kFFTSizeParam: return "FFT size";
andrewm@0: case kHopSizeParam: return "hop size";
andrewm@0: case kWindowTypeParam: return "window type";
andrewm@0: default: break;
andrewm@0: }
andrewm@0:
andrewm@0: return String::empty;
andrewm@0: }
andrewm@0:
andrewm@0: const String RobotisationAudioProcessor::getParameterText (int index)
andrewm@0: {
andrewm@0: return String (getParameter (index), 2);
andrewm@0: }
andrewm@0:
andrewm@0: const String RobotisationAudioProcessor::getInputChannelName (int channelIndex) const
andrewm@0: {
andrewm@0: return String (channelIndex + 1);
andrewm@0: }
andrewm@0:
andrewm@0: const String RobotisationAudioProcessor::getOutputChannelName (int channelIndex) const
andrewm@0: {
andrewm@0: return String (channelIndex + 1);
andrewm@0: }
andrewm@0:
andrewm@0: bool RobotisationAudioProcessor::isInputChannelStereoPair (int index) const
andrewm@0: {
andrewm@0: return true;
andrewm@0: }
andrewm@0:
andrewm@0: bool RobotisationAudioProcessor::isOutputChannelStereoPair (int index) const
andrewm@0: {
andrewm@0: return true;
andrewm@0: }
andrewm@0:
andrewm@0: bool RobotisationAudioProcessor::silenceInProducesSilenceOut() const
andrewm@0: {
andrewm@0: #if JucePlugin_SilenceInProducesSilenceOut
andrewm@0: return true;
andrewm@0: #else
andrewm@0: return false;
andrewm@0: #endif
andrewm@0: }
andrewm@0:
andrewm@0: double RobotisationAudioProcessor::getTailLengthSeconds() const
andrewm@0: {
andrewm@0: return 0.0;
andrewm@0: }
andrewm@0:
andrewm@0: bool RobotisationAudioProcessor::acceptsMidi() const
andrewm@0: {
andrewm@0: #if JucePlugin_WantsMidiInput
andrewm@0: return true;
andrewm@0: #else
andrewm@0: return false;
andrewm@0: #endif
andrewm@0: }
andrewm@0:
andrewm@0: bool RobotisationAudioProcessor::producesMidi() const
andrewm@0: {
andrewm@0: #if JucePlugin_ProducesMidiOutput
andrewm@0: return true;
andrewm@0: #else
andrewm@0: return false;
andrewm@0: #endif
andrewm@0: }
andrewm@0:
andrewm@0: int RobotisationAudioProcessor::getNumPrograms()
andrewm@0: {
andrewm@0: return 0;
andrewm@0: }
andrewm@0:
andrewm@0: int RobotisationAudioProcessor::getCurrentProgram()
andrewm@0: {
andrewm@0: return 0;
andrewm@0: }
andrewm@0:
andrewm@0: void RobotisationAudioProcessor::setCurrentProgram (int index)
andrewm@0: {
andrewm@0: }
andrewm@0:
andrewm@0: const String RobotisationAudioProcessor::getProgramName (int index)
andrewm@0: {
andrewm@0: return String::empty;
andrewm@0: }
andrewm@0:
andrewm@0: void RobotisationAudioProcessor::changeProgramName (int index, const String& newName)
andrewm@0: {
andrewm@0: }
andrewm@0:
andrewm@0: //==============================================================================
andrewm@0: void RobotisationAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock)
andrewm@0: {
andrewm@0: // Use this method as the place to do any pre-playback
andrewm@0: // initialisation that you need..
andrewm@0:
andrewm@0: initFFT(fftSelectedSize_);
andrewm@0: initWindow(fftSelectedSize_, windowType_);
andrewm@0: preparedToPlay_ = true;
andrewm@0: }
andrewm@0:
andrewm@0: void RobotisationAudioProcessor::releaseResources()
andrewm@0: {
andrewm@0: // When playback stops, you can use this as an opportunity to free up any
andrewm@0: // spare memory, etc.
andrewm@0:
andrewm@0: deinitFFT();
andrewm@0: deinitWindow();
andrewm@0: preparedToPlay_ = false;
andrewm@0: }
andrewm@0:
andrewm@0: void RobotisationAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
andrewm@0: {
andrewm@0: // Helpful information about this block of samples:
andrewm@0: const int numInputChannels = getNumInputChannels(); // How many input channels for our effect?
andrewm@0: const int numOutputChannels = getNumOutputChannels(); // How many output channels for our effect?
andrewm@0: const int numSamples = buffer.getNumSamples(); // How many samples in the buffer for this block?
andrewm@0:
andrewm@0: int channel, inwritepos, sampsincefft;
andrewm@0: int outreadpos, outwritepos;
andrewm@0:
andrewm@0: // Grab the lock that prevents the FFT settings from changing
andrewm@0: fftSpinLock_.enter();
andrewm@0:
andrewm@0: // Check that we're initialised and ready to go. If not, set output to 0
andrewm@0: if(!fftInitialised_)
andrewm@0: {
andrewm@0: for (channel = 0; channel < numOutputChannels; ++channel)
andrewm@0: {
andrewm@0: buffer.clear (channel, 0, buffer.getNumSamples());
andrewm@0: }
andrewm@0:
andrewm@0: fftSpinLock_.exit();
andrewm@0: return;
andrewm@0: }
andrewm@0:
andrewm@0: // Go through each channel of audio that's passed in. Collect the samples in the input
andrewm@0: // buffer. When we've reached the next hop interval, calculate the FFT.
andrewm@0: for (channel = 0; channel < numInputChannels; ++channel)
andrewm@0: {
andrewm@0: // channelData is an array of length numSamples which contains the audio for one channel
andrewm@0: float* channelData = buffer.getSampleData(channel);
andrewm@0:
andrewm@0: // inputBufferData is the circular buffer for collecting input samples for the FFT
andrewm@0: float* inputBufferData = inputBuffer_.getSampleData(jmin (channel, inputBuffer_.getNumChannels() - 1));
andrewm@0: float* outputBufferData = outputBuffer_.getSampleData(jmin (channel, inputBuffer_.getNumChannels() - 1));
andrewm@0:
andrewm@0: // State variables need to be temporarily cached for each channel. We don't want the
andrewm@0: // operations on one channel to affect the identical behaviour of the next channel
andrewm@0: inwritepos = inputBufferWritePosition_;
andrewm@0: outwritepos = outputBufferWritePosition_;
andrewm@0: outreadpos = outputBufferReadPosition_;
andrewm@0: sampsincefft = samplesSinceLastFFT_;
andrewm@0:
andrewm@0: for (int i = 0; i < numSamples; ++i)
andrewm@0: {
andrewm@0: const float in = channelData[i];
andrewm@0:
andrewm@0: // Store the next buffered sample in the output. Do this first before anything
andrewm@0: // changes the output buffer-- we will have at least one FFT size worth of data
andrewm@0: // stored and ready to go. Set the result to 0 when finished in preparation for the
andrewm@0: // next overlap/add procedure.
andrewm@0: channelData[i] = outputBufferData[outreadpos];
andrewm@0: outputBufferData[outreadpos] = 0.0;
andrewm@0: if(++outreadpos >= outputBufferLength_)
andrewm@0: outreadpos = 0;
andrewm@0:
andrewm@0: // Store the current sample in the input buffer, incrementing the write pointer. Also
andrewm@0: // increment how many samples we've stored since the last transform. If it reaches the
andrewm@0: // hop size, perform an FFT and any frequency-domain processing.
andrewm@0: inputBufferData[inwritepos] = in;
andrewm@0: if (++inwritepos >= inputBufferLength_)
andrewm@0: inwritepos = 0;
andrewm@0: if (++sampsincefft >= hopActualSize_)
andrewm@0: {
andrewm@0: sampsincefft = 0;
andrewm@0:
andrewm@0: // Find the index of the starting sample in the buffer. When the buffer length
andrewm@0: // is equal to the transform size, this will be the current write position but
andrewm@0: // this code is more general for larger buffers.
andrewm@0: int inputBufferStartPosition = (inwritepos + inputBufferLength_
andrewm@0: - fftActualTransformSize_) % inputBufferLength_;
andrewm@0:
andrewm@0: // Window the buffer and copy it into the FFT input
andrewm@0: int inputBufferIndex = inputBufferStartPosition;
andrewm@0: for(int fftBufferIndex = 0; fftBufferIndex < fftActualTransformSize_; fftBufferIndex++)
andrewm@0: {
andrewm@0: // Set real part to windowed signal; imaginary part to 0.
andrewm@0: fftTimeDomain_[fftBufferIndex][1] = 0.0;
andrewm@0: if(fftBufferIndex >= windowBufferLength_) // Safety check, in case window isn't ready
andrewm@0: fftTimeDomain_[fftBufferIndex][0] = 0.0;
andrewm@0: else
andrewm@0: fftTimeDomain_[fftBufferIndex][0] = windowBuffer_[fftBufferIndex]
andrewm@0: * inputBufferData[inputBufferIndex];
andrewm@0: inputBufferIndex++;
andrewm@0: if(inputBufferIndex >= inputBufferLength_)
andrewm@0: inputBufferIndex = 0;
andrewm@0: }
andrewm@0:
andrewm@0: // Perform the FFT on the windowed data, going into the frequency domain.
andrewm@0: // Result will be in fftFrequencyDomain_
andrewm@0: fftw_execute(fftForwardPlan_);
andrewm@0:
andrewm@0: // ********** PHASE VOCODER PROCESSING GOES HERE **************
andrewm@0: // This is the place where frequency-domain calculations are made
andrewm@0: // on the transformed signal. Put the result back into fftFrequencyDomain_
andrewm@0: // before transforming back.
andrewm@0: // ************************************************************
andrewm@0:
andrewm@0: // Whisperiser
andrewm@0: /*for(int bin = 0; bin <= fftActualTransformSize_ / 2; bin++)
andrewm@0: {
andrewm@0: float amplitude = sqrt(fftFrequencyDomain_[bin][0]*fftFrequencyDomain_[bin][0] +
andrewm@0: fftFrequencyDomain_[bin][1]*fftFrequencyDomain_[bin][1]);
andrewm@0:
andrewm@0: // This is what we would use to exactly reconstruct the signal:
andrewm@0: // float phase = atan2(fftFrequencyDomain_[bin][1], fftFrequencyDomain_[bin][0]);
andrewm@0:
andrewm@0: // But instead, this is what we use to scramble the phase:
andrewm@0: float phase = 2.0 * M_PI * (float)rand() / (float)RAND_MAX;
andrewm@0:
andrewm@0: // Set the phase of each bin to 0. phase = 0 means the signal is entirely
andrewm@0: // positive-real, but the overall amplitude is the same as before.
andrewm@0: fftFrequencyDomain_[bin][0] = amplitude * cos(phase);
andrewm@0: fftFrequencyDomain_[bin][1] = amplitude * sin(phase);
andrewm@0:
andrewm@0: // FFTs of real signals are conjugate-symmetric. We need to maintain that symmetry
andrewm@0: // to produce a real output, even as we randomize the phase.
andrewm@0: if(bin > 0 && bin < fftActualTransformSize_ / 2) {
andrewm@0: fftFrequencyDomain_[fftActualTransformSize_ - bin][0] = amplitude * cos(phase);
andrewm@0: fftFrequencyDomain_[fftActualTransformSize_ - bin][1] = - amplitude * sin(phase);
andrewm@0: }
andrewm@0: }*/
andrewm@0:
andrewm@0: for(int bin = 0; bin < fftActualTransformSize_; bin++)
andrewm@0: {
andrewm@0: float amplitude = sqrt(fftFrequencyDomain_[bin][0]*fftFrequencyDomain_[bin][0] +
andrewm@0: fftFrequencyDomain_[bin][1]*fftFrequencyDomain_[bin][1]);
andrewm@0:
andrewm@0: // Set the phase of each bin to 0. phase = 0 means the signal is entirely
andrewm@0: // positive-real, but the overall amplitude is the same as before.
andrewm@0: fftFrequencyDomain_[bin][0] = amplitude;
andrewm@0: fftFrequencyDomain_[bin][1] = 0.0;
andrewm@0: }
andrewm@0:
andrewm@0: // Perform the inverse FFT to get back to the time domain. Result wll be
andrewm@0: // in fftTimeDomain_. If we've done it right (kept the frequency domain
andrewm@0: // symmetric), the time domain resuld should be strictly real allowing us
andrewm@0: // to ignore the imaginary part.
andrewm@0: fftw_execute(fftBackwardPlan_);
andrewm@0:
andrewm@0: // Add the result to the output buffer, starting at the current write position
andrewm@0: // (Output buffer will have been zeroed after reading the last time around)
andrewm@0: // Output needs to be scaled by the transform size to get back to original amplitude:
andrewm@0: // this is a property of how fftw is implemented. Scaling will also need to be adjusted
andrewm@0: // based on hop size to get the same output level (smaller hop size produces more overlap
andrewm@0: // and hence higher signal level)
andrewm@0: int outputBufferIndex = outwritepos;
andrewm@0: for(int fftBufferIndex = 0; fftBufferIndex < fftActualTransformSize_; fftBufferIndex++)
andrewm@0: {
andrewm@0: // Reapply the window since phase changes will result in discontinities at the edges
andrewm@0: // of the window
andrewm@0: if(fftBufferIndex < windowBufferLength_)
andrewm@0: outputBufferData[outputBufferIndex] += windowBuffer_[fftBufferIndex] *
andrewm@0: fftTimeDomain_[fftBufferIndex][0] * fftScaleFactor_;
andrewm@0: if(++outputBufferIndex >= outputBufferLength_)
andrewm@0: outputBufferIndex = 0;
andrewm@0: }
andrewm@0:
andrewm@0: // Advance the write position within the buffer by the hop size
andrewm@0: outwritepos = (outwritepos + hopActualSize_) % outputBufferLength_;
andrewm@0: }
andrewm@0: }
andrewm@0: }
andrewm@0:
andrewm@0: // Having made a local copy of the state variables for each channel, now transfer the result
andrewm@0: // back to the main state variable so they will be preserved for the next call of processBlock()
andrewm@0: inputBufferWritePosition_ = inwritepos;
andrewm@0: outputBufferWritePosition_ = outwritepos;
andrewm@0: outputBufferReadPosition_ = outreadpos;
andrewm@0: samplesSinceLastFFT_ = sampsincefft;
andrewm@0:
andrewm@0: // In case we have more outputs than inputs, we'll clear any output
andrewm@0: // channels that didn't contain input data, (because these aren't
andrewm@0: // guaranteed to be empty - they may contain garbage).
andrewm@0: for (int i = numInputChannels; i < numOutputChannels; ++i)
andrewm@0: {
andrewm@0: buffer.clear (i, 0, buffer.getNumSamples());
andrewm@0: }
andrewm@0:
andrewm@0: fftSpinLock_.exit();
andrewm@0: }
andrewm@0:
andrewm@0: //==============================================================================
andrewm@0: bool RobotisationAudioProcessor::hasEditor() const
andrewm@0: {
andrewm@0: return true; // (change this to false if you choose to not supply an editor)
andrewm@0: }
andrewm@0:
andrewm@0: AudioProcessorEditor* RobotisationAudioProcessor::createEditor()
andrewm@0: {
andrewm@0: return new RobotisationAudioProcessorEditor (this);
andrewm@0: }
andrewm@0:
andrewm@0: //==============================================================================
andrewm@0: void RobotisationAudioProcessor::getStateInformation (MemoryBlock& destData)
andrewm@0: {
andrewm@0: // You should use this method to store your parameters in the memory block.
andrewm@0: // You could do that either as raw data, or use the XML or ValueTree classes
andrewm@0: // as intermediaries to make it easy to save and load complex data.
andrewm@0:
andrewm@0: // Create an outer XML element..
andrewm@0: XmlElement xml("C4DMPLUGINSETTINGS");
andrewm@0:
andrewm@0: // add some attributes to it..
andrewm@0: xml.setAttribute("uiWidth", lastUIWidth_);
andrewm@0: xml.setAttribute("uiHeight", lastUIHeight_);
andrewm@0: xml.setAttribute("fftSize", fftSelectedSize_);
andrewm@0: xml.setAttribute("hopSize", hopSelectedSize_);
andrewm@0: xml.setAttribute("windowType", windowType_);
andrewm@0:
andrewm@0: // then use this helper function to stuff it into the binary blob and return it..
andrewm@0: copyXmlToBinary(xml, destData);
andrewm@0: }
andrewm@0:
andrewm@0: void RobotisationAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
andrewm@0: {
andrewm@0: // You should use this method to restore your parameters from this memory block,
andrewm@0: // whose contents will have been created by the getStateInformation() call.
andrewm@0:
andrewm@0: // This getXmlFromBinary() helper function retrieves our XML from the binary blob..
andrewm@0: ScopedPointer xmlState (getXmlFromBinary (data, sizeInBytes));
andrewm@0:
andrewm@0: if(xmlState != 0)
andrewm@0: {
andrewm@0: // make sure that it's actually our type of XML object..
andrewm@0: if(xmlState->hasTagName("C4DMPLUGINSETTINGS"))
andrewm@0: {
andrewm@0: // ok, now pull out our parameters..
andrewm@0: lastUIWidth_ = xmlState->getIntAttribute("uiWidth", lastUIWidth_);
andrewm@0: lastUIHeight_ = xmlState->getIntAttribute("uiHeight", lastUIHeight_);
andrewm@0:
andrewm@0: fftSelectedSize_ = (int)xmlState->getDoubleAttribute("fftSize", fftSelectedSize_);
andrewm@0: hopSelectedSize_ = (int)xmlState->getDoubleAttribute("hopSize", hopSelectedSize_);
andrewm@0: windowType_ = (int)xmlState->getDoubleAttribute("windowType", windowType_);
andrewm@0:
andrewm@0: if(preparedToPlay_)
andrewm@0: {
andrewm@0: // Update settings if currently playing, else wait until prepareToPlay() called
andrewm@0: initFFT(fftSelectedSize_);
andrewm@0: initWindow(fftSelectedSize_, windowType_);
andrewm@0: }
andrewm@0: }
andrewm@0: }
andrewm@0: }
andrewm@0:
andrewm@0: //==============================================================================
andrewm@0: // Initialise the FFT data structures for a given length transform
andrewm@0: void RobotisationAudioProcessor::initFFT(int length)
andrewm@0: {
andrewm@0: if(fftInitialised_)
andrewm@0: deinitFFT();
andrewm@0:
andrewm@0: // Save the current length so we know how big our results are later
andrewm@0: fftActualTransformSize_ = length;
andrewm@0:
andrewm@0: // Here we allocate the complex-number buffers for the FFT. This uses
andrewm@0: // a convenient wrapper on the more general fftw_malloc()
andrewm@0: fftTimeDomain_ = fftw_alloc_complex(length);
andrewm@0: fftFrequencyDomain_ = fftw_alloc_complex(length);
andrewm@0:
andrewm@0: // FFTW_ESTIMATE doesn't necessarily produce the fastest executing code (FFTW_MEASURE
andrewm@0: // will get closer) but it carries a minimum startup cost. FFTW_MEASURE might stall for
andrewm@0: // several seconds which would be annoying in an audio plug-in context.
andrewm@0: fftForwardPlan_ = fftw_plan_dft_1d(fftActualTransformSize_, fftTimeDomain_,
andrewm@0: fftFrequencyDomain_, FFTW_FORWARD, FFTW_ESTIMATE);
andrewm@0: fftBackwardPlan_ = fftw_plan_dft_1d(fftActualTransformSize_, fftFrequencyDomain_,
andrewm@0: fftTimeDomain_, FFTW_BACKWARD, FFTW_ESTIMATE);
andrewm@0:
andrewm@0: // Allocate the buffer that the samples will be collected in
andrewm@0: inputBufferLength_ = fftActualTransformSize_;
andrewm@0: inputBuffer_.setSize(2, inputBufferLength_);
andrewm@0: inputBuffer_.clear();
andrewm@0: inputBufferWritePosition_ = 0;
andrewm@0: samplesSinceLastFFT_ = 0;
andrewm@0:
andrewm@0: // Allocate the output buffer to be twice the size of the FFT
andrewm@0: // This will be enough for all hop size cases
andrewm@0: outputBufferLength_ = 2*fftActualTransformSize_;
andrewm@0: outputBuffer_.setSize(2, outputBufferLength_);
andrewm@0: outputBuffer_.clear();
andrewm@0: outputBufferReadPosition_ = 0;
andrewm@0:
andrewm@0: updateHopSize();
andrewm@0:
andrewm@0: fftInitialised_ = true;
andrewm@0: }
andrewm@0:
andrewm@0: // Free the FFT data structures
andrewm@0: void RobotisationAudioProcessor::deinitFFT()
andrewm@0: {
andrewm@0: if(!fftInitialised_)
andrewm@0: return;
andrewm@0:
andrewm@0: // Prevent this variable from changing while an audio callback is running.
andrewm@0: // Once it has changed, the next audio callback will find that it's not
andrewm@0: // initialised and will return silence instead of attempting to work with the
andrewm@0: // (invalid) FFT structures. This produces an audible glitch but no crash,
andrewm@0: // and is the simplest way to handle parameter changes in this example code.
andrewm@0: fftSpinLock_.enter();
andrewm@0: fftInitialised_ = false;
andrewm@0: fftSpinLock_.exit();
andrewm@0:
andrewm@0: fftw_destroy_plan(fftForwardPlan_);
andrewm@0: fftw_destroy_plan(fftBackwardPlan_);
andrewm@0: fftw_free(fftTimeDomain_);
andrewm@0: fftw_free(fftFrequencyDomain_);
andrewm@0:
andrewm@0: // Leave the input buffer in memory until the plugin is released
andrewm@0: }
andrewm@0:
andrewm@0: //==============================================================================
andrewm@0: // Create a new window of a given length and type
andrewm@0: void RobotisationAudioProcessor::initWindow(int length, int windowType)
andrewm@0: {
andrewm@0: if(windowBuffer_ != 0)
andrewm@0: deinitWindow();
andrewm@0: if(length == 0) // Sanity check
andrewm@0: return;
andrewm@0:
andrewm@0: // Allocate memory for the window
andrewm@0: windowBuffer_ = (double *)malloc(length * sizeof(double));
andrewm@0:
andrewm@0: // Write the length as a double here to simplify the code below (otherwise
andrewm@0: // typecasts would be wise)
andrewm@0: double windowLength = length;
andrewm@0:
andrewm@0: // Set values for the window, depending on its type
andrewm@0: for(int i = 0; i < length; i++)
andrewm@0: {
andrewm@0: // Window functions are typically defined to be symmetrical. This will cause a
andrewm@0: // problem in the overlap-add process: the windows instead need to be periodic
andrewm@0: // when arranged end-to-end. As a result we calculate the window of one sample
andrewm@0: // larger than usual, and drop the last sample. (This works as long as N is even.)
andrewm@0: // See Julius Smith, "Spectral Audio Signal Processing" for details.
andrewm@0: switch(windowType)
andrewm@0: {
andrewm@0: case kWindowBartlett:
andrewm@0: windowBuffer_[i] = (2.0/(windowLength + 2.0))*
andrewm@0: (0.5*(windowLength + 2.0) - abs((double)i - 0.5*windowLength));
andrewm@0: break;
andrewm@0: case kWindowHann:
andrewm@0: windowBuffer_[i] = 0.5*(1.0 - cos(2.0*M_PI*(double)i/windowLength));
andrewm@0: break;
andrewm@0: case kWindowHamming:
andrewm@0: windowBuffer_[i] = 0.54 - 0.46*cos(2.0*M_PI*(double)i/windowLength);
andrewm@0: break;
andrewm@0: case kWindowRectangular:
andrewm@0: default:
andrewm@0: windowBuffer_[i] = 1.0;
andrewm@0: break;
andrewm@0: }
andrewm@0: }
andrewm@0:
andrewm@0: windowBufferLength_ = length;
andrewm@0: updateScaleFactor();
andrewm@0: }
andrewm@0:
andrewm@0: // Free the window buffer
andrewm@0: void RobotisationAudioProcessor::deinitWindow()
andrewm@0: {
andrewm@0: if(windowBuffer_ == 0)
andrewm@0: return;
andrewm@0:
andrewm@0: // Delay clearing the window until the audio thread is not running
andrewm@0: // to avoid a crash if the code tries to access an invalid window
andrewm@0: fftSpinLock_.enter();
andrewm@0: windowBufferLength_ = 0;
andrewm@0: fftSpinLock_.exit();
andrewm@0:
andrewm@0: free(windowBuffer_);
andrewm@0: windowBuffer_ = 0;
andrewm@0: }
andrewm@0:
andrewm@0: // Update the actual hop size depending on the window size and hop size settings
andrewm@0: // Hop size is expressed as a fraction of a window in the parameters.
andrewm@0: void RobotisationAudioProcessor::updateHopSize()
andrewm@0: {
andrewm@0: hopActualSize_ = hopSelectedSize_;
andrewm@0:
andrewm@0: // Update the factor by which samples are scaled to preserve unity gain
andrewm@0: updateScaleFactor();
andrewm@0:
andrewm@0: // Read pointer lags the write pointer to allow for FFT buffers to accumulate and
andrewm@0: // be processed. Total latency is sum of the FFT size and the hop size.
andrewm@0: outputBufferWritePosition_ = hopActualSize_ + fftActualTransformSize_;
andrewm@0: }
andrewm@0:
andrewm@0: // Update the factor by which each output sample is scaled. This needs to update
andrewm@0: // every time FFT size, hop size, and window type are changed.
andrewm@0: void RobotisationAudioProcessor::updateScaleFactor()
andrewm@0: {
andrewm@0: // The gain needs to be normalised by the sum of the window, which implicitly
andrewm@0: // accounts for the length of the transform and the window type. From there
andrewm@0: // we also update based on hop size: smaller hop means more overlap means the
andrewm@0: // overall gain should be reduced.
andrewm@0: double windowSum = 0.0;
andrewm@0:
andrewm@0: for(int i = 0; i < windowBufferLength_; i++)
andrewm@0: {
andrewm@0: windowSum += windowBuffer_[i];
andrewm@0: }
andrewm@0:
andrewm@0: if(windowSum == 0.0)
andrewm@0: fftScaleFactor_ = 0.0; // Catch invalid cases and mute output
andrewm@0: else
andrewm@0: {
andrewm@0: fftScaleFactor_ = ((float)hopSelectedSize_/(float)fftActualTransformSize_)/(double)windowSum;
andrewm@0: }
andrewm@0: }
andrewm@0:
andrewm@0: //==============================================================================
andrewm@0: // This creates new instances of the plugin..
andrewm@0: AudioProcessor* JUCE_CALLTYPE createPluginFilter()
andrewm@0: {
andrewm@0: return new RobotisationAudioProcessor();
andrewm@0: }