annotate effects/robotisation/Source/PluginProcessor.cpp @ 1:04e171d2a747 tip

JUCE 4 compatible. Standardised paths on Mac: modules '../../juce/modules'; VST folder '~/SDKs/vstsdk2.4' (JUCE default). Replaced deprecated 'getSampleData(channel)'; getToggleState(...); setToggleState(...); setSelectedId(...). Removed unused variables. Ignore JUCE code and build files.
author Brecht De Man <b.deman@qmul.ac.uk>
date Sun, 22 Nov 2015 15:23:40 +0000
parents e32fe563e124
children
rev   line source
andrewm@0 1 /*
andrewm@0 2 This code accompanies the textbook:
andrewm@0 3
andrewm@0 4 Digital Audio Effects: Theory, Implementation and Application
andrewm@0 5 Joshua D. Reiss and Andrew P. McPherson
andrewm@0 6
andrewm@0 7 ---
andrewm@0 8
andrewm@0 9 Robotisation: robot effect using phase vocoder;
andrewm@0 10 see also whisperisation code in processBlock() below.
andrewm@0 11
andrewm@0 12 See textbook Chapter 8: The Phase Vocoder
andrewm@0 13
andrewm@0 14 Code by Andrew McPherson, Brecht De Man and Joshua Reiss
andrewm@0 15
andrewm@0 16 This code requires the fftw library version 3 to compile:
andrewm@0 17 http://fftw.org
andrewm@0 18
andrewm@0 19 ---
andrewm@0 20
andrewm@0 21 This program is free software: you can redistribute it and/or modify
andrewm@0 22 it under the terms of the GNU General Public License as published by
andrewm@0 23 the Free Software Foundation, either version 3 of the License, or
andrewm@0 24 (at your option) any later version.
andrewm@0 25
andrewm@0 26 This program is distributed in the hope that it will be useful,
andrewm@0 27 but WITHOUT ANY WARRANTY; without even the implied warranty of
andrewm@0 28 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
andrewm@0 29 GNU General Public License for more details.
andrewm@0 30
andrewm@0 31 You should have received a copy of the GNU General Public License
andrewm@0 32 along with this program. If not, see <http://www.gnu.org/licenses/>.
andrewm@0 33 */
andrewm@0 34
andrewm@0 35
andrewm@0 36 #include "PluginProcessor.h"
andrewm@0 37 #include "PluginEditor.h"
andrewm@0 38
andrewm@0 39
andrewm@0 40 //==============================================================================
andrewm@0 41 RobotisationAudioProcessor::RobotisationAudioProcessor() : inputBuffer_(2, 1), outputBuffer_(2, 1)
andrewm@0 42 {
andrewm@0 43 // Set default values:
andrewm@0 44 fftSelectedSize_ = 512;
andrewm@0 45 hopSelectedSize_ = hopActualSize_ = 256;
andrewm@0 46 windowType_ = kWindowRectangular;
andrewm@0 47
andrewm@0 48 fftInitialised_ = false;
andrewm@0 49 fftActualTransformSize_ = 0;
andrewm@0 50 inputBufferLength_ = 1;
andrewm@0 51 outputBufferLength_ = 1;
andrewm@0 52 inputBufferWritePosition_ = outputBufferWritePosition_ = outputBufferReadPosition_ = 0;
andrewm@0 53 samplesSinceLastFFT_ = 0;
andrewm@0 54 windowBuffer_ = 0;
andrewm@0 55 windowBufferLength_ = 0;
andrewm@0 56 preparedToPlay_ = false;
andrewm@0 57 fftScaleFactor_ = 0.0;
andrewm@0 58
andrewm@0 59 lastUIWidth_ = 370;
andrewm@0 60 lastUIHeight_ = 120;
andrewm@0 61 }
andrewm@0 62
andrewm@0 63 RobotisationAudioProcessor::~RobotisationAudioProcessor()
andrewm@0 64 {
andrewm@0 65 // Release FFT resources if allocated. This should be handled by
andrewm@0 66 // releaseResources() but in the event it doesn't happen, this avoids
andrewm@0 67 // a leak. Harmless to call it twice.
andrewm@0 68 deinitFFT();
andrewm@0 69 deinitWindow();
andrewm@0 70 }
andrewm@0 71
andrewm@0 72 //==============================================================================
andrewm@0 73 const String RobotisationAudioProcessor::getName() const
andrewm@0 74 {
andrewm@0 75 return JucePlugin_Name;
andrewm@0 76 }
andrewm@0 77
andrewm@0 78 int RobotisationAudioProcessor::getNumParameters()
andrewm@0 79 {
andrewm@0 80 return kNumParameters;
andrewm@0 81 }
andrewm@0 82
andrewm@0 83 float RobotisationAudioProcessor::getParameter (int index)
andrewm@0 84 {
andrewm@0 85 // This method will be called by the host, probably on the audio thread, so
andrewm@0 86 // it's absolutely time-critical. Don't use critical sections or anything
andrewm@0 87 // UI-related, or anything at all that may block in any way!
andrewm@0 88 switch (index)
andrewm@0 89 {
andrewm@0 90 case kFFTSizeParam: return (float)fftSelectedSize_;
andrewm@0 91 case kHopSizeParam: return (float)hopSelectedSize_;
andrewm@0 92 case kWindowTypeParam: return (float)windowType_;
andrewm@0 93 default: return 0.0f;
andrewm@0 94 }
andrewm@0 95 }
andrewm@0 96
andrewm@0 97 void RobotisationAudioProcessor::setParameter (int index, float newValue)
andrewm@0 98 {
andrewm@0 99 // This method will be called by the host, probably on the audio thread, so
andrewm@0 100 // it's absolutely time-critical. Don't use critical sections or anything
andrewm@0 101 // UI-related, or anything at all that may block in any way!
andrewm@0 102 switch (index)
andrewm@0 103 {
andrewm@0 104 case kFFTSizeParam:
andrewm@0 105 if((int)newValue != fftSelectedSize_)
andrewm@0 106 {
andrewm@0 107 fftSelectedSize_ = (int)newValue;
andrewm@0 108 if(preparedToPlay_)
andrewm@0 109 {
andrewm@0 110 // Update settings if currently playing, else wait until prepareToPlay() called
andrewm@0 111 initFFT(fftSelectedSize_);
andrewm@0 112 initWindow(fftSelectedSize_, windowType_);
andrewm@0 113 }
andrewm@0 114 }
andrewm@0 115 break;
andrewm@0 116 case kHopSizeParam:
andrewm@0 117 hopSelectedSize_ = (int)newValue;
andrewm@0 118 if(preparedToPlay_)
andrewm@0 119 updateHopSize();
andrewm@0 120 break;
andrewm@0 121 case kWindowTypeParam:
andrewm@0 122 // Recalculate window if needed
andrewm@0 123 if((int)newValue != windowType_)
andrewm@0 124 {
andrewm@0 125 windowType_ = (int)newValue;
andrewm@0 126 if(preparedToPlay_)
andrewm@0 127 initWindow(fftActualTransformSize_, (int)newValue);
andrewm@0 128 }
andrewm@0 129 break;
andrewm@0 130 default:
andrewm@0 131 break;
andrewm@0 132 }
andrewm@0 133 }
andrewm@0 134
andrewm@0 135 const String RobotisationAudioProcessor::getParameterName (int index)
andrewm@0 136 {
andrewm@0 137 switch (index)
andrewm@0 138 {
andrewm@0 139 case kFFTSizeParam: return "FFT size";
andrewm@0 140 case kHopSizeParam: return "hop size";
andrewm@0 141 case kWindowTypeParam: return "window type";
andrewm@0 142 default: break;
andrewm@0 143 }
andrewm@0 144
andrewm@0 145 return String::empty;
andrewm@0 146 }
andrewm@0 147
andrewm@0 148 const String RobotisationAudioProcessor::getParameterText (int index)
andrewm@0 149 {
andrewm@0 150 return String (getParameter (index), 2);
andrewm@0 151 }
andrewm@0 152
andrewm@0 153 const String RobotisationAudioProcessor::getInputChannelName (int channelIndex) const
andrewm@0 154 {
andrewm@0 155 return String (channelIndex + 1);
andrewm@0 156 }
andrewm@0 157
andrewm@0 158 const String RobotisationAudioProcessor::getOutputChannelName (int channelIndex) const
andrewm@0 159 {
andrewm@0 160 return String (channelIndex + 1);
andrewm@0 161 }
andrewm@0 162
andrewm@0 163 bool RobotisationAudioProcessor::isInputChannelStereoPair (int index) const
andrewm@0 164 {
andrewm@0 165 return true;
andrewm@0 166 }
andrewm@0 167
andrewm@0 168 bool RobotisationAudioProcessor::isOutputChannelStereoPair (int index) const
andrewm@0 169 {
andrewm@0 170 return true;
andrewm@0 171 }
andrewm@0 172
andrewm@0 173 bool RobotisationAudioProcessor::silenceInProducesSilenceOut() const
andrewm@0 174 {
andrewm@0 175 #if JucePlugin_SilenceInProducesSilenceOut
andrewm@0 176 return true;
andrewm@0 177 #else
andrewm@0 178 return false;
andrewm@0 179 #endif
andrewm@0 180 }
andrewm@0 181
andrewm@0 182 double RobotisationAudioProcessor::getTailLengthSeconds() const
andrewm@0 183 {
andrewm@0 184 return 0.0;
andrewm@0 185 }
andrewm@0 186
andrewm@0 187 bool RobotisationAudioProcessor::acceptsMidi() const
andrewm@0 188 {
andrewm@0 189 #if JucePlugin_WantsMidiInput
andrewm@0 190 return true;
andrewm@0 191 #else
andrewm@0 192 return false;
andrewm@0 193 #endif
andrewm@0 194 }
andrewm@0 195
andrewm@0 196 bool RobotisationAudioProcessor::producesMidi() const
andrewm@0 197 {
andrewm@0 198 #if JucePlugin_ProducesMidiOutput
andrewm@0 199 return true;
andrewm@0 200 #else
andrewm@0 201 return false;
andrewm@0 202 #endif
andrewm@0 203 }
andrewm@0 204
andrewm@0 205 int RobotisationAudioProcessor::getNumPrograms()
andrewm@0 206 {
andrewm@0 207 return 0;
andrewm@0 208 }
andrewm@0 209
andrewm@0 210 int RobotisationAudioProcessor::getCurrentProgram()
andrewm@0 211 {
andrewm@0 212 return 0;
andrewm@0 213 }
andrewm@0 214
andrewm@0 215 void RobotisationAudioProcessor::setCurrentProgram (int index)
andrewm@0 216 {
andrewm@0 217 }
andrewm@0 218
andrewm@0 219 const String RobotisationAudioProcessor::getProgramName (int index)
andrewm@0 220 {
andrewm@0 221 return String::empty;
andrewm@0 222 }
andrewm@0 223
andrewm@0 224 void RobotisationAudioProcessor::changeProgramName (int index, const String& newName)
andrewm@0 225 {
andrewm@0 226 }
andrewm@0 227
andrewm@0 228 //==============================================================================
andrewm@0 229 void RobotisationAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock)
andrewm@0 230 {
andrewm@0 231 // Use this method as the place to do any pre-playback
andrewm@0 232 // initialisation that you need..
andrewm@0 233
andrewm@0 234 initFFT(fftSelectedSize_);
andrewm@0 235 initWindow(fftSelectedSize_, windowType_);
andrewm@0 236 preparedToPlay_ = true;
andrewm@0 237 }
andrewm@0 238
andrewm@0 239 void RobotisationAudioProcessor::releaseResources()
andrewm@0 240 {
andrewm@0 241 // When playback stops, you can use this as an opportunity to free up any
andrewm@0 242 // spare memory, etc.
andrewm@0 243
andrewm@0 244 deinitFFT();
andrewm@0 245 deinitWindow();
andrewm@0 246 preparedToPlay_ = false;
andrewm@0 247 }
andrewm@0 248
andrewm@0 249 void RobotisationAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
andrewm@0 250 {
andrewm@0 251 // Helpful information about this block of samples:
andrewm@0 252 const int numInputChannels = getNumInputChannels(); // How many input channels for our effect?
andrewm@0 253 const int numOutputChannels = getNumOutputChannels(); // How many output channels for our effect?
andrewm@0 254 const int numSamples = buffer.getNumSamples(); // How many samples in the buffer for this block?
andrewm@0 255
andrewm@0 256 int channel, inwritepos, sampsincefft;
andrewm@0 257 int outreadpos, outwritepos;
andrewm@0 258
andrewm@0 259 // Grab the lock that prevents the FFT settings from changing
andrewm@0 260 fftSpinLock_.enter();
andrewm@0 261
andrewm@0 262 // Check that we're initialised and ready to go. If not, set output to 0
andrewm@0 263 if(!fftInitialised_)
andrewm@0 264 {
andrewm@0 265 for (channel = 0; channel < numOutputChannels; ++channel)
andrewm@0 266 {
andrewm@0 267 buffer.clear (channel, 0, buffer.getNumSamples());
andrewm@0 268 }
andrewm@0 269
andrewm@0 270 fftSpinLock_.exit();
andrewm@0 271 return;
andrewm@0 272 }
andrewm@0 273
andrewm@0 274 // Go through each channel of audio that's passed in. Collect the samples in the input
andrewm@0 275 // buffer. When we've reached the next hop interval, calculate the FFT.
andrewm@0 276 for (channel = 0; channel < numInputChannels; ++channel)
andrewm@0 277 {
andrewm@0 278 // channelData is an array of length numSamples which contains the audio for one channel
andrewm@0 279 float* channelData = buffer.getSampleData(channel);
andrewm@0 280
andrewm@0 281 // inputBufferData is the circular buffer for collecting input samples for the FFT
andrewm@0 282 float* inputBufferData = inputBuffer_.getSampleData(jmin (channel, inputBuffer_.getNumChannels() - 1));
andrewm@0 283 float* outputBufferData = outputBuffer_.getSampleData(jmin (channel, inputBuffer_.getNumChannels() - 1));
andrewm@0 284
andrewm@0 285 // State variables need to be temporarily cached for each channel. We don't want the
andrewm@0 286 // operations on one channel to affect the identical behaviour of the next channel
andrewm@0 287 inwritepos = inputBufferWritePosition_;
andrewm@0 288 outwritepos = outputBufferWritePosition_;
andrewm@0 289 outreadpos = outputBufferReadPosition_;
andrewm@0 290 sampsincefft = samplesSinceLastFFT_;
andrewm@0 291
andrewm@0 292 for (int i = 0; i < numSamples; ++i)
andrewm@0 293 {
andrewm@0 294 const float in = channelData[i];
andrewm@0 295
andrewm@0 296 // Store the next buffered sample in the output. Do this first before anything
andrewm@0 297 // changes the output buffer-- we will have at least one FFT size worth of data
andrewm@0 298 // stored and ready to go. Set the result to 0 when finished in preparation for the
andrewm@0 299 // next overlap/add procedure.
andrewm@0 300 channelData[i] = outputBufferData[outreadpos];
andrewm@0 301 outputBufferData[outreadpos] = 0.0;
andrewm@0 302 if(++outreadpos >= outputBufferLength_)
andrewm@0 303 outreadpos = 0;
andrewm@0 304
andrewm@0 305 // Store the current sample in the input buffer, incrementing the write pointer. Also
andrewm@0 306 // increment how many samples we've stored since the last transform. If it reaches the
andrewm@0 307 // hop size, perform an FFT and any frequency-domain processing.
andrewm@0 308 inputBufferData[inwritepos] = in;
andrewm@0 309 if (++inwritepos >= inputBufferLength_)
andrewm@0 310 inwritepos = 0;
andrewm@0 311 if (++sampsincefft >= hopActualSize_)
andrewm@0 312 {
andrewm@0 313 sampsincefft = 0;
andrewm@0 314
andrewm@0 315 // Find the index of the starting sample in the buffer. When the buffer length
andrewm@0 316 // is equal to the transform size, this will be the current write position but
andrewm@0 317 // this code is more general for larger buffers.
andrewm@0 318 int inputBufferStartPosition = (inwritepos + inputBufferLength_
andrewm@0 319 - fftActualTransformSize_) % inputBufferLength_;
andrewm@0 320
andrewm@0 321 // Window the buffer and copy it into the FFT input
andrewm@0 322 int inputBufferIndex = inputBufferStartPosition;
andrewm@0 323 for(int fftBufferIndex = 0; fftBufferIndex < fftActualTransformSize_; fftBufferIndex++)
andrewm@0 324 {
andrewm@0 325 // Set real part to windowed signal; imaginary part to 0.
andrewm@0 326 fftTimeDomain_[fftBufferIndex][1] = 0.0;
andrewm@0 327 if(fftBufferIndex >= windowBufferLength_) // Safety check, in case window isn't ready
andrewm@0 328 fftTimeDomain_[fftBufferIndex][0] = 0.0;
andrewm@0 329 else
andrewm@0 330 fftTimeDomain_[fftBufferIndex][0] = windowBuffer_[fftBufferIndex]
andrewm@0 331 * inputBufferData[inputBufferIndex];
andrewm@0 332 inputBufferIndex++;
andrewm@0 333 if(inputBufferIndex >= inputBufferLength_)
andrewm@0 334 inputBufferIndex = 0;
andrewm@0 335 }
andrewm@0 336
andrewm@0 337 // Perform the FFT on the windowed data, going into the frequency domain.
andrewm@0 338 // Result will be in fftFrequencyDomain_
andrewm@0 339 fftw_execute(fftForwardPlan_);
andrewm@0 340
andrewm@0 341 // ********** PHASE VOCODER PROCESSING GOES HERE **************
andrewm@0 342 // This is the place where frequency-domain calculations are made
andrewm@0 343 // on the transformed signal. Put the result back into fftFrequencyDomain_
andrewm@0 344 // before transforming back.
andrewm@0 345 // ************************************************************
andrewm@0 346
andrewm@0 347 // Whisperiser
andrewm@0 348 /*for(int bin = 0; bin <= fftActualTransformSize_ / 2; bin++)
andrewm@0 349 {
andrewm@0 350 float amplitude = sqrt(fftFrequencyDomain_[bin][0]*fftFrequencyDomain_[bin][0] +
andrewm@0 351 fftFrequencyDomain_[bin][1]*fftFrequencyDomain_[bin][1]);
andrewm@0 352
andrewm@0 353 // This is what we would use to exactly reconstruct the signal:
andrewm@0 354 // float phase = atan2(fftFrequencyDomain_[bin][1], fftFrequencyDomain_[bin][0]);
andrewm@0 355
andrewm@0 356 // But instead, this is what we use to scramble the phase:
andrewm@0 357 float phase = 2.0 * M_PI * (float)rand() / (float)RAND_MAX;
andrewm@0 358
andrewm@0 359 // Set the phase of each bin to 0. phase = 0 means the signal is entirely
andrewm@0 360 // positive-real, but the overall amplitude is the same as before.
andrewm@0 361 fftFrequencyDomain_[bin][0] = amplitude * cos(phase);
andrewm@0 362 fftFrequencyDomain_[bin][1] = amplitude * sin(phase);
andrewm@0 363
andrewm@0 364 // FFTs of real signals are conjugate-symmetric. We need to maintain that symmetry
andrewm@0 365 // to produce a real output, even as we randomize the phase.
andrewm@0 366 if(bin > 0 && bin < fftActualTransformSize_ / 2) {
andrewm@0 367 fftFrequencyDomain_[fftActualTransformSize_ - bin][0] = amplitude * cos(phase);
andrewm@0 368 fftFrequencyDomain_[fftActualTransformSize_ - bin][1] = - amplitude * sin(phase);
andrewm@0 369 }
andrewm@0 370 }*/
andrewm@0 371
andrewm@0 372 for(int bin = 0; bin < fftActualTransformSize_; bin++)
andrewm@0 373 {
andrewm@0 374 float amplitude = sqrt(fftFrequencyDomain_[bin][0]*fftFrequencyDomain_[bin][0] +
andrewm@0 375 fftFrequencyDomain_[bin][1]*fftFrequencyDomain_[bin][1]);
andrewm@0 376
andrewm@0 377 // Set the phase of each bin to 0. phase = 0 means the signal is entirely
andrewm@0 378 // positive-real, but the overall amplitude is the same as before.
andrewm@0 379 fftFrequencyDomain_[bin][0] = amplitude;
andrewm@0 380 fftFrequencyDomain_[bin][1] = 0.0;
andrewm@0 381 }
andrewm@0 382
andrewm@0 383 // Perform the inverse FFT to get back to the time domain. Result wll be
andrewm@0 384 // in fftTimeDomain_. If we've done it right (kept the frequency domain
andrewm@0 385 // symmetric), the time domain resuld should be strictly real allowing us
andrewm@0 386 // to ignore the imaginary part.
andrewm@0 387 fftw_execute(fftBackwardPlan_);
andrewm@0 388
andrewm@0 389 // Add the result to the output buffer, starting at the current write position
andrewm@0 390 // (Output buffer will have been zeroed after reading the last time around)
andrewm@0 391 // Output needs to be scaled by the transform size to get back to original amplitude:
andrewm@0 392 // this is a property of how fftw is implemented. Scaling will also need to be adjusted
andrewm@0 393 // based on hop size to get the same output level (smaller hop size produces more overlap
andrewm@0 394 // and hence higher signal level)
andrewm@0 395 int outputBufferIndex = outwritepos;
andrewm@0 396 for(int fftBufferIndex = 0; fftBufferIndex < fftActualTransformSize_; fftBufferIndex++)
andrewm@0 397 {
andrewm@0 398 // Reapply the window since phase changes will result in discontinities at the edges
andrewm@0 399 // of the window
andrewm@0 400 if(fftBufferIndex < windowBufferLength_)
andrewm@0 401 outputBufferData[outputBufferIndex] += windowBuffer_[fftBufferIndex] *
andrewm@0 402 fftTimeDomain_[fftBufferIndex][0] * fftScaleFactor_;
andrewm@0 403 if(++outputBufferIndex >= outputBufferLength_)
andrewm@0 404 outputBufferIndex = 0;
andrewm@0 405 }
andrewm@0 406
andrewm@0 407 // Advance the write position within the buffer by the hop size
andrewm@0 408 outwritepos = (outwritepos + hopActualSize_) % outputBufferLength_;
andrewm@0 409 }
andrewm@0 410 }
andrewm@0 411 }
andrewm@0 412
andrewm@0 413 // Having made a local copy of the state variables for each channel, now transfer the result
andrewm@0 414 // back to the main state variable so they will be preserved for the next call of processBlock()
andrewm@0 415 inputBufferWritePosition_ = inwritepos;
andrewm@0 416 outputBufferWritePosition_ = outwritepos;
andrewm@0 417 outputBufferReadPosition_ = outreadpos;
andrewm@0 418 samplesSinceLastFFT_ = sampsincefft;
andrewm@0 419
andrewm@0 420 // In case we have more outputs than inputs, we'll clear any output
andrewm@0 421 // channels that didn't contain input data, (because these aren't
andrewm@0 422 // guaranteed to be empty - they may contain garbage).
andrewm@0 423 for (int i = numInputChannels; i < numOutputChannels; ++i)
andrewm@0 424 {
andrewm@0 425 buffer.clear (i, 0, buffer.getNumSamples());
andrewm@0 426 }
andrewm@0 427
andrewm@0 428 fftSpinLock_.exit();
andrewm@0 429 }
andrewm@0 430
andrewm@0 431 //==============================================================================
andrewm@0 432 bool RobotisationAudioProcessor::hasEditor() const
andrewm@0 433 {
andrewm@0 434 return true; // (change this to false if you choose to not supply an editor)
andrewm@0 435 }
andrewm@0 436
andrewm@0 437 AudioProcessorEditor* RobotisationAudioProcessor::createEditor()
andrewm@0 438 {
andrewm@0 439 return new RobotisationAudioProcessorEditor (this);
andrewm@0 440 }
andrewm@0 441
andrewm@0 442 //==============================================================================
andrewm@0 443 void RobotisationAudioProcessor::getStateInformation (MemoryBlock& destData)
andrewm@0 444 {
andrewm@0 445 // You should use this method to store your parameters in the memory block.
andrewm@0 446 // You could do that either as raw data, or use the XML or ValueTree classes
andrewm@0 447 // as intermediaries to make it easy to save and load complex data.
andrewm@0 448
andrewm@0 449 // Create an outer XML element..
andrewm@0 450 XmlElement xml("C4DMPLUGINSETTINGS");
andrewm@0 451
andrewm@0 452 // add some attributes to it..
andrewm@0 453 xml.setAttribute("uiWidth", lastUIWidth_);
andrewm@0 454 xml.setAttribute("uiHeight", lastUIHeight_);
andrewm@0 455 xml.setAttribute("fftSize", fftSelectedSize_);
andrewm@0 456 xml.setAttribute("hopSize", hopSelectedSize_);
andrewm@0 457 xml.setAttribute("windowType", windowType_);
andrewm@0 458
andrewm@0 459 // then use this helper function to stuff it into the binary blob and return it..
andrewm@0 460 copyXmlToBinary(xml, destData);
andrewm@0 461 }
andrewm@0 462
andrewm@0 463 void RobotisationAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
andrewm@0 464 {
andrewm@0 465 // You should use this method to restore your parameters from this memory block,
andrewm@0 466 // whose contents will have been created by the getStateInformation() call.
andrewm@0 467
andrewm@0 468 // This getXmlFromBinary() helper function retrieves our XML from the binary blob..
andrewm@0 469 ScopedPointer<XmlElement> xmlState (getXmlFromBinary (data, sizeInBytes));
andrewm@0 470
andrewm@0 471 if(xmlState != 0)
andrewm@0 472 {
andrewm@0 473 // make sure that it's actually our type of XML object..
andrewm@0 474 if(xmlState->hasTagName("C4DMPLUGINSETTINGS"))
andrewm@0 475 {
andrewm@0 476 // ok, now pull out our parameters..
andrewm@0 477 lastUIWidth_ = xmlState->getIntAttribute("uiWidth", lastUIWidth_);
andrewm@0 478 lastUIHeight_ = xmlState->getIntAttribute("uiHeight", lastUIHeight_);
andrewm@0 479
andrewm@0 480 fftSelectedSize_ = (int)xmlState->getDoubleAttribute("fftSize", fftSelectedSize_);
andrewm@0 481 hopSelectedSize_ = (int)xmlState->getDoubleAttribute("hopSize", hopSelectedSize_);
andrewm@0 482 windowType_ = (int)xmlState->getDoubleAttribute("windowType", windowType_);
andrewm@0 483
andrewm@0 484 if(preparedToPlay_)
andrewm@0 485 {
andrewm@0 486 // Update settings if currently playing, else wait until prepareToPlay() called
andrewm@0 487 initFFT(fftSelectedSize_);
andrewm@0 488 initWindow(fftSelectedSize_, windowType_);
andrewm@0 489 }
andrewm@0 490 }
andrewm@0 491 }
andrewm@0 492 }
andrewm@0 493
andrewm@0 494 //==============================================================================
andrewm@0 495 // Initialise the FFT data structures for a given length transform
andrewm@0 496 void RobotisationAudioProcessor::initFFT(int length)
andrewm@0 497 {
andrewm@0 498 if(fftInitialised_)
andrewm@0 499 deinitFFT();
andrewm@0 500
andrewm@0 501 // Save the current length so we know how big our results are later
andrewm@0 502 fftActualTransformSize_ = length;
andrewm@0 503
andrewm@0 504 // Here we allocate the complex-number buffers for the FFT. This uses
andrewm@0 505 // a convenient wrapper on the more general fftw_malloc()
andrewm@0 506 fftTimeDomain_ = fftw_alloc_complex(length);
andrewm@0 507 fftFrequencyDomain_ = fftw_alloc_complex(length);
andrewm@0 508
andrewm@0 509 // FFTW_ESTIMATE doesn't necessarily produce the fastest executing code (FFTW_MEASURE
andrewm@0 510 // will get closer) but it carries a minimum startup cost. FFTW_MEASURE might stall for
andrewm@0 511 // several seconds which would be annoying in an audio plug-in context.
andrewm@0 512 fftForwardPlan_ = fftw_plan_dft_1d(fftActualTransformSize_, fftTimeDomain_,
andrewm@0 513 fftFrequencyDomain_, FFTW_FORWARD, FFTW_ESTIMATE);
andrewm@0 514 fftBackwardPlan_ = fftw_plan_dft_1d(fftActualTransformSize_, fftFrequencyDomain_,
andrewm@0 515 fftTimeDomain_, FFTW_BACKWARD, FFTW_ESTIMATE);
andrewm@0 516
andrewm@0 517 // Allocate the buffer that the samples will be collected in
andrewm@0 518 inputBufferLength_ = fftActualTransformSize_;
andrewm@0 519 inputBuffer_.setSize(2, inputBufferLength_);
andrewm@0 520 inputBuffer_.clear();
andrewm@0 521 inputBufferWritePosition_ = 0;
andrewm@0 522 samplesSinceLastFFT_ = 0;
andrewm@0 523
andrewm@0 524 // Allocate the output buffer to be twice the size of the FFT
andrewm@0 525 // This will be enough for all hop size cases
andrewm@0 526 outputBufferLength_ = 2*fftActualTransformSize_;
andrewm@0 527 outputBuffer_.setSize(2, outputBufferLength_);
andrewm@0 528 outputBuffer_.clear();
andrewm@0 529 outputBufferReadPosition_ = 0;
andrewm@0 530
andrewm@0 531 updateHopSize();
andrewm@0 532
andrewm@0 533 fftInitialised_ = true;
andrewm@0 534 }
andrewm@0 535
andrewm@0 536 // Free the FFT data structures
andrewm@0 537 void RobotisationAudioProcessor::deinitFFT()
andrewm@0 538 {
andrewm@0 539 if(!fftInitialised_)
andrewm@0 540 return;
andrewm@0 541
andrewm@0 542 // Prevent this variable from changing while an audio callback is running.
andrewm@0 543 // Once it has changed, the next audio callback will find that it's not
andrewm@0 544 // initialised and will return silence instead of attempting to work with the
andrewm@0 545 // (invalid) FFT structures. This produces an audible glitch but no crash,
andrewm@0 546 // and is the simplest way to handle parameter changes in this example code.
andrewm@0 547 fftSpinLock_.enter();
andrewm@0 548 fftInitialised_ = false;
andrewm@0 549 fftSpinLock_.exit();
andrewm@0 550
andrewm@0 551 fftw_destroy_plan(fftForwardPlan_);
andrewm@0 552 fftw_destroy_plan(fftBackwardPlan_);
andrewm@0 553 fftw_free(fftTimeDomain_);
andrewm@0 554 fftw_free(fftFrequencyDomain_);
andrewm@0 555
andrewm@0 556 // Leave the input buffer in memory until the plugin is released
andrewm@0 557 }
andrewm@0 558
andrewm@0 559 //==============================================================================
andrewm@0 560 // Create a new window of a given length and type
andrewm@0 561 void RobotisationAudioProcessor::initWindow(int length, int windowType)
andrewm@0 562 {
andrewm@0 563 if(windowBuffer_ != 0)
andrewm@0 564 deinitWindow();
andrewm@0 565 if(length == 0) // Sanity check
andrewm@0 566 return;
andrewm@0 567
andrewm@0 568 // Allocate memory for the window
andrewm@0 569 windowBuffer_ = (double *)malloc(length * sizeof(double));
andrewm@0 570
andrewm@0 571 // Write the length as a double here to simplify the code below (otherwise
andrewm@0 572 // typecasts would be wise)
andrewm@0 573 double windowLength = length;
andrewm@0 574
andrewm@0 575 // Set values for the window, depending on its type
andrewm@0 576 for(int i = 0; i < length; i++)
andrewm@0 577 {
andrewm@0 578 // Window functions are typically defined to be symmetrical. This will cause a
andrewm@0 579 // problem in the overlap-add process: the windows instead need to be periodic
andrewm@0 580 // when arranged end-to-end. As a result we calculate the window of one sample
andrewm@0 581 // larger than usual, and drop the last sample. (This works as long as N is even.)
andrewm@0 582 // See Julius Smith, "Spectral Audio Signal Processing" for details.
andrewm@0 583 switch(windowType)
andrewm@0 584 {
andrewm@0 585 case kWindowBartlett:
andrewm@0 586 windowBuffer_[i] = (2.0/(windowLength + 2.0))*
andrewm@0 587 (0.5*(windowLength + 2.0) - abs((double)i - 0.5*windowLength));
andrewm@0 588 break;
andrewm@0 589 case kWindowHann:
andrewm@0 590 windowBuffer_[i] = 0.5*(1.0 - cos(2.0*M_PI*(double)i/windowLength));
andrewm@0 591 break;
andrewm@0 592 case kWindowHamming:
andrewm@0 593 windowBuffer_[i] = 0.54 - 0.46*cos(2.0*M_PI*(double)i/windowLength);
andrewm@0 594 break;
andrewm@0 595 case kWindowRectangular:
andrewm@0 596 default:
andrewm@0 597 windowBuffer_[i] = 1.0;
andrewm@0 598 break;
andrewm@0 599 }
andrewm@0 600 }
andrewm@0 601
andrewm@0 602 windowBufferLength_ = length;
andrewm@0 603 updateScaleFactor();
andrewm@0 604 }
andrewm@0 605
andrewm@0 606 // Free the window buffer
andrewm@0 607 void RobotisationAudioProcessor::deinitWindow()
andrewm@0 608 {
andrewm@0 609 if(windowBuffer_ == 0)
andrewm@0 610 return;
andrewm@0 611
andrewm@0 612 // Delay clearing the window until the audio thread is not running
andrewm@0 613 // to avoid a crash if the code tries to access an invalid window
andrewm@0 614 fftSpinLock_.enter();
andrewm@0 615 windowBufferLength_ = 0;
andrewm@0 616 fftSpinLock_.exit();
andrewm@0 617
andrewm@0 618 free(windowBuffer_);
andrewm@0 619 windowBuffer_ = 0;
andrewm@0 620 }
andrewm@0 621
andrewm@0 622 // Update the actual hop size depending on the window size and hop size settings
andrewm@0 623 // Hop size is expressed as a fraction of a window in the parameters.
andrewm@0 624 void RobotisationAudioProcessor::updateHopSize()
andrewm@0 625 {
andrewm@0 626 hopActualSize_ = hopSelectedSize_;
andrewm@0 627
andrewm@0 628 // Update the factor by which samples are scaled to preserve unity gain
andrewm@0 629 updateScaleFactor();
andrewm@0 630
andrewm@0 631 // Read pointer lags the write pointer to allow for FFT buffers to accumulate and
andrewm@0 632 // be processed. Total latency is sum of the FFT size and the hop size.
andrewm@0 633 outputBufferWritePosition_ = hopActualSize_ + fftActualTransformSize_;
andrewm@0 634 }
andrewm@0 635
andrewm@0 636 // Update the factor by which each output sample is scaled. This needs to update
andrewm@0 637 // every time FFT size, hop size, and window type are changed.
andrewm@0 638 void RobotisationAudioProcessor::updateScaleFactor()
andrewm@0 639 {
andrewm@0 640 // The gain needs to be normalised by the sum of the window, which implicitly
andrewm@0 641 // accounts for the length of the transform and the window type. From there
andrewm@0 642 // we also update based on hop size: smaller hop means more overlap means the
andrewm@0 643 // overall gain should be reduced.
andrewm@0 644 double windowSum = 0.0;
andrewm@0 645
andrewm@0 646 for(int i = 0; i < windowBufferLength_; i++)
andrewm@0 647 {
andrewm@0 648 windowSum += windowBuffer_[i];
andrewm@0 649 }
andrewm@0 650
andrewm@0 651 if(windowSum == 0.0)
andrewm@0 652 fftScaleFactor_ = 0.0; // Catch invalid cases and mute output
andrewm@0 653 else
andrewm@0 654 {
andrewm@0 655 fftScaleFactor_ = ((float)hopSelectedSize_/(float)fftActualTransformSize_)/(double)windowSum;
andrewm@0 656 }
andrewm@0 657 }
andrewm@0 658
andrewm@0 659 //==============================================================================
andrewm@0 660 // This creates new instances of the plugin..
andrewm@0 661 AudioProcessor* JUCE_CALLTYPE createPluginFilter()
andrewm@0 662 {
andrewm@0 663 return new RobotisationAudioProcessor();
andrewm@0 664 }