andrewm@0: /* andrewm@0: This code accompanies the textbook: andrewm@0: andrewm@0: Digital Audio Effects: Theory, Implementation and Application andrewm@0: Joshua D. Reiss and Andrew P. McPherson andrewm@0: andrewm@0: --- andrewm@0: andrewm@0: PVOC Pitch Shift: pitch shifter using phase vocoder andrewm@0: See textbook Chapter 8: The Phase Vocoder andrewm@0: andrewm@0: Code by Andrew McPherson, Brecht De Man and Joshua Reiss andrewm@0: Based on a project by Xinyuan Lai andrewm@0: andrewm@0: This code requires the fftw library version 3 to compile: andrewm@0: http://fftw.org andrewm@0: andrewm@0: --- andrewm@0: andrewm@0: This program is free software: you can redistribute it and/or modify andrewm@0: it under the terms of the GNU General Public License as published by andrewm@0: the Free Software Foundation, either version 3 of the License, or andrewm@0: (at your option) any later version. andrewm@0: andrewm@0: This program is distributed in the hope that it will be useful, andrewm@0: but WITHOUT ANY WARRANTY; without even the implied warranty of andrewm@0: MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the andrewm@0: GNU General Public License for more details. andrewm@0: andrewm@0: You should have received a copy of the GNU General Public License andrewm@0: along with this program. If not, see . andrewm@0: */ andrewm@0: andrewm@0: #include "PluginProcessor.h" andrewm@0: #include "PluginEditor.h" andrewm@0: andrewm@0: andrewm@0: //============================================================================== andrewm@0: PVOCPitchShiftAudioProcessor::PVOCPitchShiftAudioProcessor() : inputBuffer_(2, 1), outputBuffer_(2, 1) andrewm@0: { andrewm@0: // Set default values: andrewm@0: fftSelectedSize_ = 1024; andrewm@0: hopSelectedSize_ = kHopSize1_8Window; andrewm@0: windowType_ = kWindowHann; andrewm@0: andrewm@0: // (⊙_⊙) andrewm@0: pitchSelectedShift_ = kShift0; andrewm@0: pitchActualShift_ = 1.0; andrewm@0: pitchActualShiftRec_ = 1.0; andrewm@0: actualRatio_ = 1.0; andrewm@0: synthesisWindowBufferLength_ = 1024; andrewm@0: for (int i = 0; i<2048; i++) andrewm@0: { andrewm@0: omega_[i] = 0.25*M_PI*i; // 0.25 corresponding to 1/8 window (2*hopsize/windowlength) andrewm@0: } andrewm@0: andrewm@0: fftInitialised_ = false; andrewm@0: fftActualTransformSize_ = 0; andrewm@0: inputBufferLength_ = 1; andrewm@0: outputBufferLength_ = 1; andrewm@0: inputBufferWritePosition_ = outputBufferWritePosition_ = outputBufferReadPosition_ = 0; andrewm@0: samplesSinceLastFFT_ = 0; andrewm@0: windowBuffer_ = 0; andrewm@0: synthesisWindowBuffer_ = 0; andrewm@0: windowBufferLength_ = 0; andrewm@0: synthesisWindowBufferLength_ = 0; andrewm@0: preparedToPlay_ = false; andrewm@0: fftScaleFactor_ = 0.0; andrewm@0: andrewm@0: lastUIWidth_ = 370; andrewm@0: lastUIHeight_ = 120; andrewm@0: } andrewm@0: andrewm@0: PVOCPitchShiftAudioProcessor::~PVOCPitchShiftAudioProcessor() andrewm@0: { andrewm@0: // Release FFT resources if allocated. This should be handled by andrewm@0: // releaseResources() but in the event it doesn't happen, this avoids andrewm@0: // a leak. Harmless to call it twice. andrewm@0: deinitFFT(); andrewm@0: deinitWindow(); andrewm@0: deinitSynthesisWindow(); andrewm@0: } andrewm@0: andrewm@0: //============================================================================== andrewm@0: const String PVOCPitchShiftAudioProcessor::getName() const andrewm@0: { andrewm@0: return JucePlugin_Name; andrewm@0: } andrewm@0: andrewm@0: int PVOCPitchShiftAudioProcessor::getNumParameters() andrewm@0: { andrewm@0: return kNumParameters; andrewm@0: } andrewm@0: andrewm@0: float PVOCPitchShiftAudioProcessor::getParameter (int index) andrewm@0: { andrewm@0: // This method will be called by the host, probably on the audio thread, so andrewm@0: // it's absolutely time-critical. Don't use critical sections or anything andrewm@0: // UI-related, or anything at all that may block in any way! andrewm@0: switch (index) andrewm@0: { andrewm@0: case kFFTSizeParam: return (float)fftSelectedSize_; andrewm@0: case kHopSizeParam: return (float)hopSelectedSize_; andrewm@0: case kWindowTypeParam: return (float)windowType_; andrewm@0: case kPitchShiftParam: return (float)pitchSelectedShift_; // (⊙_⊙) andrewm@0: default: return 0.0f; andrewm@0: } andrewm@0: } andrewm@0: andrewm@0: void PVOCPitchShiftAudioProcessor::setParameter (int index, float newValue) andrewm@0: { andrewm@0: // This method will be called by the host, probably on the audio thread, so andrewm@0: // it's absolutely time-critical. Don't use critical sections or anything andrewm@0: // UI-related, or anything at all that may block in any way! andrewm@0: switch (index) andrewm@0: { andrewm@0: case kFFTSizeParam: andrewm@0: if((int)newValue != fftSelectedSize_) andrewm@0: { andrewm@0: fftSelectedSize_ = (int)newValue; andrewm@0: // (⊙_⊙) andrewm@0: synthesisWindowBufferLength_ = floor(fftSelectedSize_*pitchActualShiftRec_); andrewm@0: andrewm@0: if(preparedToPlay_) andrewm@0: { andrewm@0: // Update settings if currently playing, else wait until prepareToPlay() called andrewm@0: initFFT(fftSelectedSize_); andrewm@0: initWindow(fftSelectedSize_, windowType_); andrewm@0: initSynthesisWindow(floor(fftSelectedSize_*pitchActualShiftRec_), windowType_); andrewm@0: } andrewm@0: } andrewm@0: break; andrewm@0: case kHopSizeParam: andrewm@0: hopSelectedSize_ = (int)newValue; andrewm@0: if(preparedToPlay_) andrewm@0: { andrewm@0: updateHopSize(); andrewm@0: initWindow(fftSelectedSize_, windowType_); andrewm@0: initSynthesisWindow(floor(fftSelectedSize_*pitchActualShiftRec_), windowType_); andrewm@0: } andrewm@0: break; andrewm@0: case kWindowTypeParam: andrewm@0: // Recalculate window if needed andrewm@0: if((int)newValue != windowType_) andrewm@0: { andrewm@0: windowType_ = (int)newValue; andrewm@0: if(preparedToPlay_) andrewm@0: { andrewm@0: initWindow(fftActualTransformSize_, (int)newValue); andrewm@0: initSynthesisWindow(floor(fftSelectedSize_*pitchActualShiftRec_), windowType_); andrewm@0: } andrewm@0: } andrewm@0: break; andrewm@0: case kPitchShiftParam: andrewm@0: // (⊙_⊙) andrewm@0: andrewm@0: if((int)newValue != pitchSelectedShift_) andrewm@0: { andrewm@0: pitchSelectedShift_ = (int)newValue; andrewm@0: if(preparedToPlay_) andrewm@0: { andrewm@0: updatePitchShift(); andrewm@0: initWindow(fftSelectedSize_, windowType_); andrewm@0: initSynthesisWindow(floor(fftSelectedSize_*pitchActualShiftRec_), windowType_); andrewm@0: } andrewm@0: } andrewm@0: break; andrewm@0: default: andrewm@0: break; andrewm@0: } andrewm@0: andrewm@0: // (⊙_⊙) reset the arrays containing the phase information andrewm@0: for (int i = 0; i<2048; i++) andrewm@0: { andrewm@0: omega_[i] = 2*M_PI*i* hopActualSize_/fftActualTransformSize_; andrewm@0: for (int j=0; j<2; j++) andrewm@0: { andrewm@0: phi0_[i][j] = 0; andrewm@0: dphi_[i][j] = 0; andrewm@0: psi_[i][j] = 0; andrewm@0: } andrewm@0: andrewm@0: } andrewm@0: andrewm@0: } andrewm@0: andrewm@0: const String PVOCPitchShiftAudioProcessor::getParameterName (int index) andrewm@0: { andrewm@0: switch (index) andrewm@0: { andrewm@0: case kFFTSizeParam: return "FFT size"; andrewm@0: case kHopSizeParam: return "hop size"; andrewm@0: case kWindowTypeParam: return "window type"; andrewm@0: case kPitchShiftParam: return "pitch shift"; // (⊙_⊙) andrewm@0: default: break; andrewm@0: } andrewm@0: andrewm@0: return String::empty; andrewm@0: } andrewm@0: andrewm@0: const String PVOCPitchShiftAudioProcessor::getParameterText (int index) andrewm@0: { andrewm@0: return String (getParameter (index), 2); andrewm@0: } andrewm@0: andrewm@0: const String PVOCPitchShiftAudioProcessor::getInputChannelName (int channelIndex) const andrewm@0: { andrewm@0: return String (channelIndex + 1); andrewm@0: } andrewm@0: andrewm@0: const String PVOCPitchShiftAudioProcessor::getOutputChannelName (int channelIndex) const andrewm@0: { andrewm@0: return String (channelIndex + 1); andrewm@0: } andrewm@0: andrewm@0: bool PVOCPitchShiftAudioProcessor::isInputChannelStereoPair (int index) const andrewm@0: { andrewm@0: return true; andrewm@0: } andrewm@0: andrewm@0: bool PVOCPitchShiftAudioProcessor::isOutputChannelStereoPair (int index) const andrewm@0: { andrewm@0: return true; andrewm@0: } andrewm@0: andrewm@0: bool PVOCPitchShiftAudioProcessor::silenceInProducesSilenceOut() const andrewm@0: { andrewm@0: #if JucePlugin_SilenceInProducesSilenceOut andrewm@0: return true; andrewm@0: #else andrewm@0: return false; andrewm@0: #endif andrewm@0: } andrewm@0: andrewm@0: double PVOCPitchShiftAudioProcessor::getTailLengthSeconds() const andrewm@0: { andrewm@0: return 0.0; andrewm@0: } andrewm@0: andrewm@0: bool PVOCPitchShiftAudioProcessor::acceptsMidi() const andrewm@0: { andrewm@0: #if JucePlugin_WantsMidiInput andrewm@0: return true; andrewm@0: #else andrewm@0: return false; andrewm@0: #endif andrewm@0: } andrewm@0: andrewm@0: bool PVOCPitchShiftAudioProcessor::producesMidi() const andrewm@0: { andrewm@0: #if JucePlugin_ProducesMidiOutput andrewm@0: return true; andrewm@0: #else andrewm@0: return false; andrewm@0: #endif andrewm@0: } andrewm@0: andrewm@0: int PVOCPitchShiftAudioProcessor::getNumPrograms() andrewm@0: { andrewm@0: return 0; andrewm@0: } andrewm@0: andrewm@0: int PVOCPitchShiftAudioProcessor::getCurrentProgram() andrewm@0: { andrewm@0: return 0; andrewm@0: } andrewm@0: andrewm@0: void PVOCPitchShiftAudioProcessor::setCurrentProgram (int index) andrewm@0: { andrewm@0: } andrewm@0: andrewm@0: const String PVOCPitchShiftAudioProcessor::getProgramName (int index) andrewm@0: { andrewm@0: return String::empty; andrewm@0: } andrewm@0: andrewm@0: void PVOCPitchShiftAudioProcessor::changeProgramName (int index, const String& newName) andrewm@0: { andrewm@0: } andrewm@0: andrewm@0: //============================================================================== andrewm@0: void PVOCPitchShiftAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock) andrewm@0: { andrewm@0: // Use this method as the place to do any pre-playback andrewm@0: // initialisation that you need.. andrewm@0: andrewm@0: initFFT(fftSelectedSize_); andrewm@0: initWindow(fftSelectedSize_, windowType_); andrewm@0: initSynthesisWindow(floor(fftSelectedSize_*pitchActualShiftRec_), windowType_); andrewm@0: preparedToPlay_ = true; andrewm@0: } andrewm@0: andrewm@0: void PVOCPitchShiftAudioProcessor::releaseResources() andrewm@0: { andrewm@0: // When playback stops, you can use this as an opportunity to free up any andrewm@0: // spare memory, etc. andrewm@0: andrewm@0: deinitFFT(); andrewm@0: deinitWindow(); andrewm@0: deinitSynthesisWindow(); andrewm@0: preparedToPlay_ = false; andrewm@0: } andrewm@0: andrewm@0: void PVOCPitchShiftAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) andrewm@0: { andrewm@0: // Helpful information about this block of samples: andrewm@0: const int numInputChannels = getNumInputChannels(); // How many input channels for our effect? andrewm@0: const int numOutputChannels = getNumOutputChannels(); // How many output channels for our effect? andrewm@0: const int numSamples = buffer.getNumSamples(); // How many samples in the buffer for this block? andrewm@0: andrewm@0: int channel, inwritepos, sampsincefft; andrewm@0: int outreadpos, outwritepos; andrewm@0: andrewm@0: // Grab the lock that prevents the FFT settings from changing andrewm@0: fftSpinLock_.enter(); andrewm@0: andrewm@0: // Check that we're initialised and ready to go. If not, set output to 0 andrewm@0: if(!fftInitialised_) andrewm@0: { andrewm@0: for (channel = 0; channel < numOutputChannels; ++channel) andrewm@0: { andrewm@0: buffer.clear (channel, 0, buffer.getNumSamples()); andrewm@0: } andrewm@0: andrewm@0: fftSpinLock_.exit(); andrewm@0: return; andrewm@0: } andrewm@0: andrewm@0: // Go through each channel of audio that's passed in. Collect the samples in the input andrewm@0: // buffer. When we've reached the next hop interval, calculate the FFT. andrewm@0: for (channel = 0; channel < numInputChannels; ++channel) andrewm@0: { andrewm@0: // (⊙_⊙) andrewm@0: //double amplitude[fftActualTransformSize_]; andrewm@0: andrewm@0: // double phi[fftActualTransformSize_]; andrewm@0: // double phi0[fftActualTransformSize_]; andrewm@0: // double dphi[fftActualTransformSize_]; andrewm@0: // double psi[fftActualTransformSize_]; andrewm@0: // double omega[fftActualTransformSize_]; andrewm@0: // for (int i = 0; i= outputBufferLength_) andrewm@0: outreadpos = 0; andrewm@0: andrewm@0: // Store the current sample in the input buffer, incrementing the write pointer. Also andrewm@0: // increment how many samples we've stored since the last transform. If it reaches the andrewm@0: // hop size, perform an FFT and any frequency-domain processing. andrewm@0: inputBufferData[inwritepos] = in; andrewm@0: if (++inwritepos >= inputBufferLength_) andrewm@0: inwritepos = 0; andrewm@0: if (++sampsincefft >= hopActualSize_) andrewm@0: { andrewm@0: sampsincefft = 0; andrewm@0: andrewm@0: // Find the index of the starting sample in the buffer. When the buffer length andrewm@0: // is equal to the transform size, this will be the current write position but andrewm@0: // this code is more general for larger buffers. andrewm@0: int inputBufferStartPosition = (inwritepos + inputBufferLength_ andrewm@0: - fftActualTransformSize_) % inputBufferLength_; andrewm@0: andrewm@0: // Window the buffer and copy it into the FFT input andrewm@0: int inputBufferIndex = inputBufferStartPosition; andrewm@0: for(int fftBufferIndex = 0; fftBufferIndex < fftActualTransformSize_; fftBufferIndex++) andrewm@0: { andrewm@0: // Set real part to windowed signal; imaginary part to 0. andrewm@0: fftTimeDomain_[fftBufferIndex][1] = 0.0; andrewm@0: if(fftBufferIndex >= windowBufferLength_) // Safety check, in case window isn't ready andrewm@0: fftTimeDomain_[fftBufferIndex][0] = 0.0; andrewm@0: else andrewm@0: fftTimeDomain_[fftBufferIndex][0] = windowBuffer_[fftBufferIndex] andrewm@0: * inputBufferData[inputBufferIndex]; andrewm@0: inputBufferIndex++; andrewm@0: if(inputBufferIndex >= inputBufferLength_) andrewm@0: inputBufferIndex = 0; andrewm@0: } andrewm@0: andrewm@0: // Perform the FFT on the windowed data, going into the frequency domain. andrewm@0: // Result will be in fftFrequencyDomain_ andrewm@0: fftw_execute(fftForwardPlan_); andrewm@0: andrewm@0: // ********** PHASE VOCODER PROCESSING GOES HERE ************** andrewm@0: // This is the place where frequency-domain calculations are made andrewm@0: // on the transformed signal. Put the result back into fftFrequencyDomain_ andrewm@0: // before transforming back. andrewm@0: andrewm@0: // (⊙_⊙) andrewm@0: andrewm@0: for (int i = 0; i synthesisWindowBufferLength_) andrewm@0: outputBufferData[outputBufferIndex] += 0; andrewm@0: else andrewm@0: outputBufferData[outputBufferIndex] += grain3[fftBufferIndex] * fftScaleFactor_ *synthesisWindowBuffer_[fftBufferIndex]; andrewm@0: andrewm@0: if(++outputBufferIndex >= outputBufferLength_) andrewm@0: outputBufferIndex = 0; andrewm@0: } andrewm@0: andrewm@0: // Advance the write position within the buffer by the hop size andrewm@0: outwritepos = (outwritepos + hopActualSize_) % outputBufferLength_; andrewm@0: } andrewm@0: } andrewm@0: } andrewm@0: andrewm@0: // Having made a local copy of the state variables for each channel, now transfer the result andrewm@0: // back to the main state variable so they will be preserved for the next call of processBlock() andrewm@0: inputBufferWritePosition_ = inwritepos; andrewm@0: outputBufferWritePosition_ = outwritepos; andrewm@0: outputBufferReadPosition_ = outreadpos; andrewm@0: samplesSinceLastFFT_ = sampsincefft; andrewm@0: andrewm@0: // In case we have more outputs than inputs, we'll clear any output andrewm@0: // channels that didn't contain input data, (because these aren't andrewm@0: // guaranteed to be empty - they may contain garbage). andrewm@0: for (int i = numInputChannels; i < numOutputChannels; ++i) andrewm@0: { andrewm@0: buffer.clear (i, 0, buffer.getNumSamples()); andrewm@0: } andrewm@0: andrewm@0: fftSpinLock_.exit(); andrewm@0: } andrewm@0: andrewm@0: //============================================================================== andrewm@0: bool PVOCPitchShiftAudioProcessor::hasEditor() const andrewm@0: { andrewm@0: return true; // (change this to false if you choose to not supply an editor) andrewm@0: } andrewm@0: andrewm@0: AudioProcessorEditor* PVOCPitchShiftAudioProcessor::createEditor() andrewm@0: { andrewm@0: return new PVOCPitchShiftAudioProcessorEditor (this); andrewm@0: } andrewm@0: andrewm@0: //============================================================================== andrewm@0: void PVOCPitchShiftAudioProcessor::getStateInformation (MemoryBlock& destData) andrewm@0: { andrewm@0: // You should use this method to store your parameters in the memory block. andrewm@0: // You could do that either as raw data, or use the XML or ValueTree classes andrewm@0: // as intermediaries to make it easy to save and load complex data. andrewm@0: andrewm@0: // Create an outer XML element.. andrewm@0: XmlElement xml("C4DMPLUGINSETTINGS"); andrewm@0: andrewm@0: // add some attributes to it.. andrewm@0: xml.setAttribute("uiWidth", lastUIWidth_); andrewm@0: xml.setAttribute("uiHeight", lastUIHeight_); andrewm@0: xml.setAttribute("fftSize", fftSelectedSize_); andrewm@0: xml.setAttribute("hopSize", hopSelectedSize_); andrewm@0: xml.setAttribute("windowType", windowType_); andrewm@0: xml.setAttribute("pitchShift", pitchSelectedShift_); // (⊙_⊙) andrewm@0: andrewm@0: // then use this helper function to stuff it into the binary blob and return it.. andrewm@0: copyXmlToBinary(xml, destData); andrewm@0: } andrewm@0: andrewm@0: void PVOCPitchShiftAudioProcessor::setStateInformation (const void* data, int sizeInBytes) andrewm@0: { andrewm@0: // You should use this method to restore your parameters from this memory block, andrewm@0: // whose contents will have been created by the getStateInformation() call. andrewm@0: andrewm@0: // This getXmlFromBinary() helper function retrieves our XML from the binary blob.. andrewm@0: ScopedPointer xmlState (getXmlFromBinary (data, sizeInBytes)); andrewm@0: andrewm@0: if(xmlState != 0) andrewm@0: { andrewm@0: // make sure that it's actually our type of XML object.. andrewm@0: if(xmlState->hasTagName("C4DMPLUGINSETTINGS")) andrewm@0: { andrewm@0: // ok, now pull out our parameters.. andrewm@0: lastUIWidth_ = xmlState->getIntAttribute("uiWidth", lastUIWidth_); andrewm@0: lastUIHeight_ = xmlState->getIntAttribute("uiHeight", lastUIHeight_); andrewm@0: andrewm@0: fftSelectedSize_ = (int)xmlState->getDoubleAttribute("fftSize", fftSelectedSize_); andrewm@0: hopSelectedSize_ = (int)xmlState->getDoubleAttribute("hopSize", hopSelectedSize_); andrewm@0: windowType_ = (int)xmlState->getDoubleAttribute("windowType", windowType_); andrewm@0: // (⊙_⊙) andrewm@0: pitchSelectedShift_ = (int)xmlState->getDoubleAttribute("pitchShift", pitchSelectedShift_); andrewm@0: andrewm@0: andrewm@0: if(preparedToPlay_) andrewm@0: { andrewm@0: // Update settings if currently playing, else wait until prepareToPlay() called andrewm@0: initFFT(fftSelectedSize_); andrewm@0: initWindow(fftSelectedSize_, windowType_); andrewm@0: initSynthesisWindow(floor(fftSelectedSize_*pitchActualShiftRec_), windowType_); andrewm@0: } andrewm@0: } andrewm@0: } andrewm@0: } andrewm@0: andrewm@0: //============================================================================== andrewm@0: // Initialise the FFT data structures for a given length transform andrewm@0: void PVOCPitchShiftAudioProcessor::initFFT(int length) andrewm@0: { andrewm@0: if(fftInitialised_) andrewm@0: deinitFFT(); andrewm@0: andrewm@0: // Save the current length so we know how big our results are later andrewm@0: fftActualTransformSize_ = length; andrewm@0: andrewm@0: // Here we allocate the complex-number buffers for the FFT. This uses andrewm@0: // a convenient wrapper on the more general fftw_malloc() andrewm@0: fftTimeDomain_ = fftw_alloc_complex(length); andrewm@0: fftFrequencyDomain_ = fftw_alloc_complex(length); andrewm@0: andrewm@0: // FFTW_ESTIMATE doesn't necessarily produce the fastest executing code (FFTW_MEASURE andrewm@0: // will get closer) but it carries a minimum startup cost. FFTW_MEASURE might stall for andrewm@0: // several seconds which would be annoying in an audio plug-in context. andrewm@0: fftForwardPlan_ = fftw_plan_dft_1d(fftActualTransformSize_, fftTimeDomain_, andrewm@0: fftFrequencyDomain_, FFTW_FORWARD, FFTW_ESTIMATE); andrewm@0: fftBackwardPlan_ = fftw_plan_dft_1d(fftActualTransformSize_, fftFrequencyDomain_, andrewm@0: fftTimeDomain_, FFTW_BACKWARD, FFTW_ESTIMATE); andrewm@0: andrewm@0: // Allocate the buffer that the samples will be collected in andrewm@0: inputBufferLength_ = fftActualTransformSize_; andrewm@0: inputBuffer_.setSize(2, inputBufferLength_); andrewm@0: inputBuffer_.clear(); andrewm@0: inputBufferWritePosition_ = 0; andrewm@0: samplesSinceLastFFT_ = 0; andrewm@0: andrewm@0: // Allocate the output buffer to be twice the size of the FFT andrewm@0: // This will be enough for all hop size cases andrewm@0: outputBufferLength_ = 2*fftActualTransformSize_; andrewm@0: outputBuffer_.setSize(2, outputBufferLength_); andrewm@0: outputBuffer_.clear(); andrewm@0: outputBufferReadPosition_ = 0; andrewm@0: andrewm@0: updateHopSize(); andrewm@0: andrewm@0: //(⊙_⊙) andrewm@0: updatePitchShift(); andrewm@0: andrewm@0: fftInitialised_ = true; andrewm@0: } andrewm@0: andrewm@0: // Free the FFT data structures andrewm@0: void PVOCPitchShiftAudioProcessor::deinitFFT() andrewm@0: { andrewm@0: if(!fftInitialised_) andrewm@0: return; andrewm@0: andrewm@0: // Prevent this variable from changing while an audio callback is running. andrewm@0: // Once it has changed, the next audio callback will find that it's not andrewm@0: // initialised and will return silence instead of attempting to work with the andrewm@0: // (invalid) FFT structures. This produces an audible glitch but no crash, andrewm@0: // and is the simplest way to handle parameter changes in this example code. andrewm@0: fftSpinLock_.enter(); andrewm@0: fftInitialised_ = false; andrewm@0: fftSpinLock_.exit(); andrewm@0: andrewm@0: fftw_destroy_plan(fftForwardPlan_); andrewm@0: fftw_destroy_plan(fftBackwardPlan_); andrewm@0: fftw_free(fftTimeDomain_); andrewm@0: fftw_free(fftFrequencyDomain_); andrewm@0: andrewm@0: // Leave the input buffer in memory until the plugin is released andrewm@0: } andrewm@0: andrewm@0: //============================================================================== andrewm@0: // Create a new window of a given length and type andrewm@0: void PVOCPitchShiftAudioProcessor::initWindow(int length, int windowType) andrewm@0: { andrewm@0: if(windowBuffer_ != 0) andrewm@0: deinitWindow(); andrewm@0: if(length == 0) // Sanity check andrewm@0: return; andrewm@0: andrewm@0: // Allocate memory for the window andrewm@0: windowBuffer_ = (double *)malloc(length * sizeof(double)); andrewm@0: andrewm@0: // Write the length as a double here to simplify the code below (otherwise andrewm@0: // typecasts would be wise) andrewm@0: double windowLength = length; andrewm@0: andrewm@0: // Set values for the window, depending on its type andrewm@0: for(int i = 0; i < length; i++) andrewm@0: { andrewm@0: // Window functions are typically defined to be symmetrical. This will cause a andrewm@0: // problem in the overlap-add process: the windows instead need to be periodic andrewm@0: // when arranged end-to-end. As a result we calculate the window of one sample andrewm@0: // larger than usual, and drop the last sample. (This works as long as N is even.) andrewm@0: // See Julius Smith, "Spectral Audio Signal Processing" for details. andrewm@0: switch(windowType) andrewm@0: { andrewm@0: case kWindowBartlett: andrewm@0: windowBuffer_[i] = (2.0/(windowLength + 2.0))* andrewm@0: (0.5*(windowLength + 2.0) - abs((double)i - 0.5*windowLength)); andrewm@0: break; andrewm@0: case kWindowHann: andrewm@0: windowBuffer_[i] = 0.5*(1.0 - cos(2.0*M_PI*(double)i/windowLength)); andrewm@0: break; andrewm@0: case kWindowHamming: andrewm@0: windowBuffer_[i] = 0.54 - 0.46*cos(2.0*M_PI*(double)i/windowLength); andrewm@0: break; andrewm@0: case kWindowRectangular: andrewm@0: default: andrewm@0: windowBuffer_[i] = 1.0; andrewm@0: break; andrewm@0: } andrewm@0: } andrewm@0: andrewm@0: windowBufferLength_ = length; andrewm@0: updateScaleFactor(); andrewm@0: } andrewm@0: andrewm@0: //============================================================================== andrewm@0: // Create a new synthesis window of a given length and type andrewm@0: void PVOCPitchShiftAudioProcessor::initSynthesisWindow(int length, int windowType) andrewm@0: { andrewm@0: if(synthesisWindowBuffer_ != 0) andrewm@0: deinitSynthesisWindow(); andrewm@0: if(length == 0) // Sanity check andrewm@0: return; andrewm@0: andrewm@0: // Allocate memory for the window andrewm@0: synthesisWindowBuffer_ = (double *)malloc(length * sizeof(double)); andrewm@0: andrewm@0: // Write the length as a double here to simplify the code below (otherwise andrewm@0: // typecasts would be wise) andrewm@0: double windowLength = length; andrewm@0: andrewm@0: // Set values for the window, depending on its type andrewm@0: for(int i = 0; i < length; i++) andrewm@0: { andrewm@0: // Window functions are typically defined to be symmetrical. This will cause a andrewm@0: // problem in the overlap-add process: the windows instead need to be periodic andrewm@0: // when arranged end-to-end. As a result we calculate the window of one sample andrewm@0: // larger than usual, and drop the last sample. (This works as long as N is even.) andrewm@0: // See Julius Smith, "Spectral Audio Signal Processing" for details. andrewm@0: switch(windowType) andrewm@0: { andrewm@0: case kWindowBartlett: andrewm@0: synthesisWindowBuffer_[i] = (2.0/(windowLength + 2.0))* andrewm@0: (0.5*(windowLength + 2.0) - abs((double)i - 0.5*windowLength)); andrewm@0: break; andrewm@0: case kWindowHann: andrewm@0: synthesisWindowBuffer_[i] = 0.5*(1.0 - cos(2.0*M_PI*(double)i/windowLength)); andrewm@0: break; andrewm@0: case kWindowHamming: andrewm@0: synthesisWindowBuffer_[i] = 0.54 - 0.46*cos(2.0*M_PI*(double)i/windowLength); andrewm@0: break; andrewm@0: case kWindowRectangular: andrewm@0: default: andrewm@0: synthesisWindowBuffer_[i] = 1.0; andrewm@0: break; andrewm@0: } andrewm@0: } andrewm@0: andrewm@0: synthesisWindowBufferLength_ = length; andrewm@0: updateScaleFactor(); andrewm@0: } andrewm@0: andrewm@0: // Free the window buffer andrewm@0: void PVOCPitchShiftAudioProcessor::deinitWindow() andrewm@0: { andrewm@0: if(windowBuffer_ == 0) andrewm@0: return; andrewm@0: andrewm@0: // Delay clearing the window until the audio thread is not running andrewm@0: // to avoid a crash if the code tries to access an invalid window andrewm@0: fftSpinLock_.enter(); andrewm@0: windowBufferLength_ = 0; andrewm@0: fftSpinLock_.exit(); andrewm@0: andrewm@0: free(windowBuffer_); andrewm@0: windowBuffer_ = 0; andrewm@0: } andrewm@0: andrewm@0: // Free the synthesis window buffer andrewm@0: void PVOCPitchShiftAudioProcessor::deinitSynthesisWindow() andrewm@0: { andrewm@0: if(synthesisWindowBuffer_ == 0) andrewm@0: return; andrewm@0: andrewm@0: // Delay clearing the window until the audio thread is not running andrewm@0: // to avoid a crash if the code tries to access an invalid window andrewm@0: fftSpinLock_.enter(); andrewm@0: synthesisWindowBufferLength_ = 0; andrewm@0: fftSpinLock_.exit(); andrewm@0: andrewm@0: free(synthesisWindowBuffer_); andrewm@0: synthesisWindowBuffer_ = 0; andrewm@0: } andrewm@0: andrewm@0: // Update the actual hop size depending on the window size and hop size settings andrewm@0: // Hop size is expressed as a fraction of a window in the parameters. andrewm@0: void PVOCPitchShiftAudioProcessor::updateHopSize() andrewm@0: { andrewm@0: switch(hopSelectedSize_) andrewm@0: { andrewm@0: case kHopSize1Window: andrewm@0: hopActualSize_ = fftActualTransformSize_; andrewm@0: break; andrewm@0: case kHopSize1_2Window: andrewm@0: hopActualSize_ = fftActualTransformSize_ / 2; andrewm@0: break; andrewm@0: case kHopSize1_4Window: andrewm@0: hopActualSize_ = fftActualTransformSize_ / 4; andrewm@0: break; andrewm@0: case kHopSize1_8Window: andrewm@0: hopActualSize_ = fftActualTransformSize_ / 8; andrewm@0: break; andrewm@0: } andrewm@0: andrewm@0: // Update the factor by which samples are scaled to preserve unity gain andrewm@0: updateScaleFactor(); andrewm@0: andrewm@0: // Read pointer lags the write pointer to allow for FFT buffers to accumulate and andrewm@0: // be processed. Total latency is sum of the FFT size and the hop size. andrewm@0: outputBufferWritePosition_ = hopActualSize_ + fftActualTransformSize_; andrewm@0: } andrewm@0: andrewm@0: andrewm@0: // (⊙_⊙) Update the pitch shift andrewm@0: void PVOCPitchShiftAudioProcessor::updatePitchShift() andrewm@0: { andrewm@0: switch(pitchSelectedShift_) andrewm@0: { andrewm@0: case kShift0: andrewm@0: pitchActualShift_ = 1.0; andrewm@0: break; andrewm@0: case kShiftP1: andrewm@0: pitchActualShift_ = pow(2.0, 1.0/12.0 ); andrewm@0: break; andrewm@0: case kShiftP2: andrewm@0: pitchActualShift_ = pow(2.0, 2.0/12.0 ); andrewm@0: break; andrewm@0: case kShiftP3: andrewm@0: pitchActualShift_ = pow(2.0, 3.0/12.0 ); andrewm@0: break; andrewm@0: case kShiftP4: andrewm@0: pitchActualShift_ = pow(2.0, 4.0/12.0 ); andrewm@0: break; andrewm@0: case kShiftP5: andrewm@0: pitchActualShift_ = pow(2.0, 5.0/12.0 ); andrewm@0: break; andrewm@0: case kShiftP6: andrewm@0: pitchActualShift_ = pow(2.0, 6.0/12.0 ); andrewm@0: break; andrewm@0: case kShiftM1: andrewm@0: pitchActualShift_ = pow(2.0, -1.0/12.0 ); andrewm@0: break; andrewm@0: case kShiftM2: andrewm@0: pitchActualShift_ = pow(2.0, -2.0/12.0 ); andrewm@0: break; andrewm@0: case kShiftM3: andrewm@0: pitchActualShift_ = pow(2.0, -3.0/12.0 ); andrewm@0: break; andrewm@0: case kShiftM4: andrewm@0: pitchActualShift_ = pow(2.0, -4.0/12.0 ); andrewm@0: break; andrewm@0: case kShiftM5: andrewm@0: pitchActualShift_ = pow(2.0, -5.0/12.0 ); andrewm@0: break; andrewm@0: case kShiftM6: andrewm@0: pitchActualShift_ = pow(2.0, -6.0/12.0 ); andrewm@0: break; andrewm@0: } andrewm@0: actualRatio_ = round(pitchActualShift_*hopActualSize_)/hopActualSize_; andrewm@0: pitchActualShiftRec_ = 1/pitchActualShift_; andrewm@0: } andrewm@0: andrewm@0: // (⊙_⊙) principle phase argument mod(phasein+pi,-2*pi)+pi; andrewm@0: double PVOCPitchShiftAudioProcessor::princArg(double phaseIn) andrewm@0: { andrewm@0: if (phaseIn >= 0) andrewm@0: return fmod(phaseIn + M_PI, 2*M_PI) - M_PI; andrewm@0: else andrewm@0: return fmod(phaseIn + M_PI, -2*M_PI) + M_PI; andrewm@0: } andrewm@0: andrewm@0: // Update the factor by which each output sample is scaled. This needs to update andrewm@0: // every time FFT size, hop size, and window type are changed. andrewm@0: void PVOCPitchShiftAudioProcessor::updateScaleFactor() andrewm@0: { andrewm@0: // The gain needs to be normalised by the sum of the window, which implicitly andrewm@0: // accounts for the length of the transform and the window type. From there andrewm@0: // we also update based on hop size: smaller hop means more overlap means the andrewm@0: // overall gain should be reduced. andrewm@0: double windowSum = 0.0; andrewm@0: andrewm@0: for(int i = 0; i < windowBufferLength_; i++) andrewm@0: { andrewm@0: windowSum += windowBuffer_[i]; andrewm@0: } andrewm@0: andrewm@0: if(windowSum == 0.0) andrewm@0: fftScaleFactor_ = 0.0; // Catch invalid cases and mute output andrewm@0: else andrewm@0: { andrewm@0: switch(hopSelectedSize_) andrewm@0: { andrewm@0: case kHopSize1Window: // 0dB andrewm@0: fftScaleFactor_ = 1.0/(double)windowSum; andrewm@0: break; andrewm@0: case kHopSize1_2Window: // -6dB andrewm@0: fftScaleFactor_ = 0.5/(double)windowSum; andrewm@0: break; andrewm@0: case kHopSize1_4Window: // -12dB andrewm@0: fftScaleFactor_ = 0.25/(double)windowSum; andrewm@0: break; andrewm@0: case kHopSize1_8Window: // -18dB andrewm@0: fftScaleFactor_ = 0.125/(double)windowSum; andrewm@0: break; andrewm@0: } andrewm@0: } andrewm@0: } andrewm@0: andrewm@0: //============================================================================== andrewm@0: // This creates new instances of the plugin.. andrewm@0: AudioProcessor* JUCE_CALLTYPE createPluginFilter() andrewm@0: { andrewm@0: return new PVOCPitchShiftAudioProcessor(); andrewm@0: }