andrewm@0
|
1 /*
|
andrewm@0
|
2 This code accompanies the textbook:
|
andrewm@0
|
3
|
andrewm@0
|
4 Digital Audio Effects: Theory, Implementation and Application
|
andrewm@0
|
5 Joshua D. Reiss and Andrew P. McPherson
|
andrewm@0
|
6
|
andrewm@0
|
7 ---
|
andrewm@0
|
8
|
andrewm@0
|
9 PVOC Pitch Shift: pitch shifter using phase vocoder
|
andrewm@0
|
10 See textbook Chapter 8: The Phase Vocoder
|
andrewm@0
|
11
|
andrewm@0
|
12 Code by Andrew McPherson, Brecht De Man and Joshua Reiss
|
andrewm@0
|
13 Based on a project by Xinyuan Lai
|
andrewm@0
|
14
|
andrewm@0
|
15 This code requires the fftw library version 3 to compile:
|
andrewm@0
|
16 http://fftw.org
|
andrewm@0
|
17
|
andrewm@0
|
18 ---
|
andrewm@0
|
19
|
andrewm@0
|
20 This program is free software: you can redistribute it and/or modify
|
andrewm@0
|
21 it under the terms of the GNU General Public License as published by
|
andrewm@0
|
22 the Free Software Foundation, either version 3 of the License, or
|
andrewm@0
|
23 (at your option) any later version.
|
andrewm@0
|
24
|
andrewm@0
|
25 This program is distributed in the hope that it will be useful,
|
andrewm@0
|
26 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
andrewm@0
|
27 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
andrewm@0
|
28 GNU General Public License for more details.
|
andrewm@0
|
29
|
andrewm@0
|
30 You should have received a copy of the GNU General Public License
|
andrewm@0
|
31 along with this program. If not, see <http://www.gnu.org/licenses/>.
|
andrewm@0
|
32 */
|
andrewm@0
|
33
|
andrewm@0
|
34 #include "PluginProcessor.h"
|
andrewm@0
|
35 #include "PluginEditor.h"
|
andrewm@0
|
36
|
andrewm@0
|
37
|
andrewm@0
|
38 //==============================================================================
|
andrewm@0
|
39 PVOCPitchShiftAudioProcessor::PVOCPitchShiftAudioProcessor() : inputBuffer_(2, 1), outputBuffer_(2, 1)
|
andrewm@0
|
40 {
|
andrewm@0
|
41 // Set default values:
|
andrewm@0
|
42 fftSelectedSize_ = 1024;
|
andrewm@0
|
43 hopSelectedSize_ = kHopSize1_8Window;
|
andrewm@0
|
44 windowType_ = kWindowHann;
|
andrewm@0
|
45
|
andrewm@0
|
46 // (⊙_⊙)
|
andrewm@0
|
47 pitchSelectedShift_ = kShift0;
|
andrewm@0
|
48 pitchActualShift_ = 1.0;
|
andrewm@0
|
49 pitchActualShiftRec_ = 1.0;
|
andrewm@0
|
50 actualRatio_ = 1.0;
|
andrewm@0
|
51 synthesisWindowBufferLength_ = 1024;
|
andrewm@0
|
52 for (int i = 0; i<2048; i++)
|
andrewm@0
|
53 {
|
andrewm@0
|
54 omega_[i] = 0.25*M_PI*i; // 0.25 corresponding to 1/8 window (2*hopsize/windowlength)
|
andrewm@0
|
55 }
|
andrewm@0
|
56
|
andrewm@0
|
57 fftInitialised_ = false;
|
andrewm@0
|
58 fftActualTransformSize_ = 0;
|
andrewm@0
|
59 inputBufferLength_ = 1;
|
andrewm@0
|
60 outputBufferLength_ = 1;
|
andrewm@0
|
61 inputBufferWritePosition_ = outputBufferWritePosition_ = outputBufferReadPosition_ = 0;
|
andrewm@0
|
62 samplesSinceLastFFT_ = 0;
|
andrewm@0
|
63 windowBuffer_ = 0;
|
andrewm@0
|
64 synthesisWindowBuffer_ = 0;
|
andrewm@0
|
65 windowBufferLength_ = 0;
|
andrewm@0
|
66 synthesisWindowBufferLength_ = 0;
|
andrewm@0
|
67 preparedToPlay_ = false;
|
andrewm@0
|
68 fftScaleFactor_ = 0.0;
|
andrewm@0
|
69
|
andrewm@0
|
70 lastUIWidth_ = 370;
|
andrewm@0
|
71 lastUIHeight_ = 120;
|
andrewm@0
|
72 }
|
andrewm@0
|
73
|
andrewm@0
|
74 PVOCPitchShiftAudioProcessor::~PVOCPitchShiftAudioProcessor()
|
andrewm@0
|
75 {
|
andrewm@0
|
76 // Release FFT resources if allocated. This should be handled by
|
andrewm@0
|
77 // releaseResources() but in the event it doesn't happen, this avoids
|
andrewm@0
|
78 // a leak. Harmless to call it twice.
|
andrewm@0
|
79 deinitFFT();
|
andrewm@0
|
80 deinitWindow();
|
andrewm@0
|
81 deinitSynthesisWindow();
|
andrewm@0
|
82 }
|
andrewm@0
|
83
|
andrewm@0
|
84 //==============================================================================
|
andrewm@0
|
85 const String PVOCPitchShiftAudioProcessor::getName() const
|
andrewm@0
|
86 {
|
andrewm@0
|
87 return JucePlugin_Name;
|
andrewm@0
|
88 }
|
andrewm@0
|
89
|
andrewm@0
|
90 int PVOCPitchShiftAudioProcessor::getNumParameters()
|
andrewm@0
|
91 {
|
andrewm@0
|
92 return kNumParameters;
|
andrewm@0
|
93 }
|
andrewm@0
|
94
|
andrewm@0
|
95 float PVOCPitchShiftAudioProcessor::getParameter (int index)
|
andrewm@0
|
96 {
|
andrewm@0
|
97 // This method will be called by the host, probably on the audio thread, so
|
andrewm@0
|
98 // it's absolutely time-critical. Don't use critical sections or anything
|
andrewm@0
|
99 // UI-related, or anything at all that may block in any way!
|
andrewm@0
|
100 switch (index)
|
andrewm@0
|
101 {
|
andrewm@0
|
102 case kFFTSizeParam: return (float)fftSelectedSize_;
|
andrewm@0
|
103 case kHopSizeParam: return (float)hopSelectedSize_;
|
andrewm@0
|
104 case kWindowTypeParam: return (float)windowType_;
|
andrewm@0
|
105 case kPitchShiftParam: return (float)pitchSelectedShift_; // (⊙_⊙)
|
andrewm@0
|
106 default: return 0.0f;
|
andrewm@0
|
107 }
|
andrewm@0
|
108 }
|
andrewm@0
|
109
|
andrewm@0
|
110 void PVOCPitchShiftAudioProcessor::setParameter (int index, float newValue)
|
andrewm@0
|
111 {
|
andrewm@0
|
112 // This method will be called by the host, probably on the audio thread, so
|
andrewm@0
|
113 // it's absolutely time-critical. Don't use critical sections or anything
|
andrewm@0
|
114 // UI-related, or anything at all that may block in any way!
|
andrewm@0
|
115 switch (index)
|
andrewm@0
|
116 {
|
andrewm@0
|
117 case kFFTSizeParam:
|
andrewm@0
|
118 if((int)newValue != fftSelectedSize_)
|
andrewm@0
|
119 {
|
andrewm@0
|
120 fftSelectedSize_ = (int)newValue;
|
andrewm@0
|
121 // (⊙_⊙)
|
andrewm@0
|
122 synthesisWindowBufferLength_ = floor(fftSelectedSize_*pitchActualShiftRec_);
|
andrewm@0
|
123
|
andrewm@0
|
124 if(preparedToPlay_)
|
andrewm@0
|
125 {
|
andrewm@0
|
126 // Update settings if currently playing, else wait until prepareToPlay() called
|
andrewm@0
|
127 initFFT(fftSelectedSize_);
|
andrewm@0
|
128 initWindow(fftSelectedSize_, windowType_);
|
andrewm@0
|
129 initSynthesisWindow(floor(fftSelectedSize_*pitchActualShiftRec_), windowType_);
|
andrewm@0
|
130 }
|
andrewm@0
|
131 }
|
andrewm@0
|
132 break;
|
andrewm@0
|
133 case kHopSizeParam:
|
andrewm@0
|
134 hopSelectedSize_ = (int)newValue;
|
andrewm@0
|
135 if(preparedToPlay_)
|
andrewm@0
|
136 {
|
andrewm@0
|
137 updateHopSize();
|
andrewm@0
|
138 initWindow(fftSelectedSize_, windowType_);
|
andrewm@0
|
139 initSynthesisWindow(floor(fftSelectedSize_*pitchActualShiftRec_), windowType_);
|
andrewm@0
|
140 }
|
andrewm@0
|
141 break;
|
andrewm@0
|
142 case kWindowTypeParam:
|
andrewm@0
|
143 // Recalculate window if needed
|
andrewm@0
|
144 if((int)newValue != windowType_)
|
andrewm@0
|
145 {
|
andrewm@0
|
146 windowType_ = (int)newValue;
|
andrewm@0
|
147 if(preparedToPlay_)
|
andrewm@0
|
148 {
|
andrewm@0
|
149 initWindow(fftActualTransformSize_, (int)newValue);
|
andrewm@0
|
150 initSynthesisWindow(floor(fftSelectedSize_*pitchActualShiftRec_), windowType_);
|
andrewm@0
|
151 }
|
andrewm@0
|
152 }
|
andrewm@0
|
153 break;
|
andrewm@0
|
154 case kPitchShiftParam:
|
andrewm@0
|
155 // (⊙_⊙)
|
andrewm@0
|
156
|
andrewm@0
|
157 if((int)newValue != pitchSelectedShift_)
|
andrewm@0
|
158 {
|
andrewm@0
|
159 pitchSelectedShift_ = (int)newValue;
|
andrewm@0
|
160 if(preparedToPlay_)
|
andrewm@0
|
161 {
|
andrewm@0
|
162 updatePitchShift();
|
andrewm@0
|
163 initWindow(fftSelectedSize_, windowType_);
|
andrewm@0
|
164 initSynthesisWindow(floor(fftSelectedSize_*pitchActualShiftRec_), windowType_);
|
andrewm@0
|
165 }
|
andrewm@0
|
166 }
|
andrewm@0
|
167 break;
|
andrewm@0
|
168 default:
|
andrewm@0
|
169 break;
|
andrewm@0
|
170 }
|
andrewm@0
|
171
|
andrewm@0
|
172 // (⊙_⊙) reset the arrays containing the phase information
|
andrewm@0
|
173 for (int i = 0; i<2048; i++)
|
andrewm@0
|
174 {
|
andrewm@0
|
175 omega_[i] = 2*M_PI*i* hopActualSize_/fftActualTransformSize_;
|
andrewm@0
|
176 for (int j=0; j<2; j++)
|
andrewm@0
|
177 {
|
andrewm@0
|
178 phi0_[i][j] = 0;
|
andrewm@0
|
179 dphi_[i][j] = 0;
|
andrewm@0
|
180 psi_[i][j] = 0;
|
andrewm@0
|
181 }
|
andrewm@0
|
182
|
andrewm@0
|
183 }
|
andrewm@0
|
184
|
andrewm@0
|
185 }
|
andrewm@0
|
186
|
andrewm@0
|
187 const String PVOCPitchShiftAudioProcessor::getParameterName (int index)
|
andrewm@0
|
188 {
|
andrewm@0
|
189 switch (index)
|
andrewm@0
|
190 {
|
andrewm@0
|
191 case kFFTSizeParam: return "FFT size";
|
andrewm@0
|
192 case kHopSizeParam: return "hop size";
|
andrewm@0
|
193 case kWindowTypeParam: return "window type";
|
andrewm@0
|
194 case kPitchShiftParam: return "pitch shift"; // (⊙_⊙)
|
andrewm@0
|
195 default: break;
|
andrewm@0
|
196 }
|
andrewm@0
|
197
|
andrewm@0
|
198 return String::empty;
|
andrewm@0
|
199 }
|
andrewm@0
|
200
|
andrewm@0
|
201 const String PVOCPitchShiftAudioProcessor::getParameterText (int index)
|
andrewm@0
|
202 {
|
andrewm@0
|
203 return String (getParameter (index), 2);
|
andrewm@0
|
204 }
|
andrewm@0
|
205
|
andrewm@0
|
206 const String PVOCPitchShiftAudioProcessor::getInputChannelName (int channelIndex) const
|
andrewm@0
|
207 {
|
andrewm@0
|
208 return String (channelIndex + 1);
|
andrewm@0
|
209 }
|
andrewm@0
|
210
|
andrewm@0
|
211 const String PVOCPitchShiftAudioProcessor::getOutputChannelName (int channelIndex) const
|
andrewm@0
|
212 {
|
andrewm@0
|
213 return String (channelIndex + 1);
|
andrewm@0
|
214 }
|
andrewm@0
|
215
|
andrewm@0
|
216 bool PVOCPitchShiftAudioProcessor::isInputChannelStereoPair (int index) const
|
andrewm@0
|
217 {
|
andrewm@0
|
218 return true;
|
andrewm@0
|
219 }
|
andrewm@0
|
220
|
andrewm@0
|
221 bool PVOCPitchShiftAudioProcessor::isOutputChannelStereoPair (int index) const
|
andrewm@0
|
222 {
|
andrewm@0
|
223 return true;
|
andrewm@0
|
224 }
|
andrewm@0
|
225
|
andrewm@0
|
226 bool PVOCPitchShiftAudioProcessor::silenceInProducesSilenceOut() const
|
andrewm@0
|
227 {
|
andrewm@0
|
228 #if JucePlugin_SilenceInProducesSilenceOut
|
andrewm@0
|
229 return true;
|
andrewm@0
|
230 #else
|
andrewm@0
|
231 return false;
|
andrewm@0
|
232 #endif
|
andrewm@0
|
233 }
|
andrewm@0
|
234
|
andrewm@0
|
235 double PVOCPitchShiftAudioProcessor::getTailLengthSeconds() const
|
andrewm@0
|
236 {
|
andrewm@0
|
237 return 0.0;
|
andrewm@0
|
238 }
|
andrewm@0
|
239
|
andrewm@0
|
240 bool PVOCPitchShiftAudioProcessor::acceptsMidi() const
|
andrewm@0
|
241 {
|
andrewm@0
|
242 #if JucePlugin_WantsMidiInput
|
andrewm@0
|
243 return true;
|
andrewm@0
|
244 #else
|
andrewm@0
|
245 return false;
|
andrewm@0
|
246 #endif
|
andrewm@0
|
247 }
|
andrewm@0
|
248
|
andrewm@0
|
249 bool PVOCPitchShiftAudioProcessor::producesMidi() const
|
andrewm@0
|
250 {
|
andrewm@0
|
251 #if JucePlugin_ProducesMidiOutput
|
andrewm@0
|
252 return true;
|
andrewm@0
|
253 #else
|
andrewm@0
|
254 return false;
|
andrewm@0
|
255 #endif
|
andrewm@0
|
256 }
|
andrewm@0
|
257
|
andrewm@0
|
258 int PVOCPitchShiftAudioProcessor::getNumPrograms()
|
andrewm@0
|
259 {
|
andrewm@0
|
260 return 0;
|
andrewm@0
|
261 }
|
andrewm@0
|
262
|
andrewm@0
|
263 int PVOCPitchShiftAudioProcessor::getCurrentProgram()
|
andrewm@0
|
264 {
|
andrewm@0
|
265 return 0;
|
andrewm@0
|
266 }
|
andrewm@0
|
267
|
andrewm@0
|
268 void PVOCPitchShiftAudioProcessor::setCurrentProgram (int index)
|
andrewm@0
|
269 {
|
andrewm@0
|
270 }
|
andrewm@0
|
271
|
andrewm@0
|
272 const String PVOCPitchShiftAudioProcessor::getProgramName (int index)
|
andrewm@0
|
273 {
|
andrewm@0
|
274 return String::empty;
|
andrewm@0
|
275 }
|
andrewm@0
|
276
|
andrewm@0
|
277 void PVOCPitchShiftAudioProcessor::changeProgramName (int index, const String& newName)
|
andrewm@0
|
278 {
|
andrewm@0
|
279 }
|
andrewm@0
|
280
|
andrewm@0
|
281 //==============================================================================
|
andrewm@0
|
282 void PVOCPitchShiftAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock)
|
andrewm@0
|
283 {
|
andrewm@0
|
284 // Use this method as the place to do any pre-playback
|
andrewm@0
|
285 // initialisation that you need..
|
andrewm@0
|
286
|
andrewm@0
|
287 initFFT(fftSelectedSize_);
|
andrewm@0
|
288 initWindow(fftSelectedSize_, windowType_);
|
andrewm@0
|
289 initSynthesisWindow(floor(fftSelectedSize_*pitchActualShiftRec_), windowType_);
|
andrewm@0
|
290 preparedToPlay_ = true;
|
andrewm@0
|
291 }
|
andrewm@0
|
292
|
andrewm@0
|
293 void PVOCPitchShiftAudioProcessor::releaseResources()
|
andrewm@0
|
294 {
|
andrewm@0
|
295 // When playback stops, you can use this as an opportunity to free up any
|
andrewm@0
|
296 // spare memory, etc.
|
andrewm@0
|
297
|
andrewm@0
|
298 deinitFFT();
|
andrewm@0
|
299 deinitWindow();
|
andrewm@0
|
300 deinitSynthesisWindow();
|
andrewm@0
|
301 preparedToPlay_ = false;
|
andrewm@0
|
302 }
|
andrewm@0
|
303
|
andrewm@0
|
304 void PVOCPitchShiftAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
|
andrewm@0
|
305 {
|
andrewm@0
|
306 // Helpful information about this block of samples:
|
andrewm@0
|
307 const int numInputChannels = getNumInputChannels(); // How many input channels for our effect?
|
andrewm@0
|
308 const int numOutputChannels = getNumOutputChannels(); // How many output channels for our effect?
|
andrewm@0
|
309 const int numSamples = buffer.getNumSamples(); // How many samples in the buffer for this block?
|
andrewm@0
|
310
|
andrewm@0
|
311 int channel, inwritepos, sampsincefft;
|
andrewm@0
|
312 int outreadpos, outwritepos;
|
andrewm@0
|
313
|
andrewm@0
|
314 // Grab the lock that prevents the FFT settings from changing
|
andrewm@0
|
315 fftSpinLock_.enter();
|
andrewm@0
|
316
|
andrewm@0
|
317 // Check that we're initialised and ready to go. If not, set output to 0
|
andrewm@0
|
318 if(!fftInitialised_)
|
andrewm@0
|
319 {
|
andrewm@0
|
320 for (channel = 0; channel < numOutputChannels; ++channel)
|
andrewm@0
|
321 {
|
andrewm@0
|
322 buffer.clear (channel, 0, buffer.getNumSamples());
|
andrewm@0
|
323 }
|
andrewm@0
|
324
|
andrewm@0
|
325 fftSpinLock_.exit();
|
andrewm@0
|
326 return;
|
andrewm@0
|
327 }
|
andrewm@0
|
328
|
andrewm@0
|
329 // Go through each channel of audio that's passed in. Collect the samples in the input
|
andrewm@0
|
330 // buffer. When we've reached the next hop interval, calculate the FFT.
|
andrewm@0
|
331 for (channel = 0; channel < numInputChannels; ++channel)
|
andrewm@0
|
332 {
|
andrewm@0
|
333 // (⊙_⊙)
|
andrewm@0
|
334 //double amplitude[fftActualTransformSize_];
|
andrewm@0
|
335
|
andrewm@0
|
336 // double phi[fftActualTransformSize_];
|
andrewm@0
|
337 // double phi0[fftActualTransformSize_];
|
andrewm@0
|
338 // double dphi[fftActualTransformSize_];
|
andrewm@0
|
339 // double psi[fftActualTransformSize_];
|
andrewm@0
|
340 // double omega[fftActualTransformSize_];
|
andrewm@0
|
341 // for (int i = 0; i<fftActualTransformSize_; i++)
|
andrewm@0
|
342 // {
|
andrewm@0
|
343 // omega[i] = 2*M_PI*hopActualSize_*i/fftActualTransformSize_;
|
andrewm@0
|
344 // }
|
andrewm@0
|
345
|
andrewm@0
|
346 // (⊙_⊙) variables prepared for resampling
|
andrewm@0
|
347 double grain2[fftActualTransformSize_ + 1];
|
andrewm@0
|
348 double grain3[(int)floor(pitchActualShiftRec_*fftActualTransformSize_)];
|
andrewm@0
|
349 double lx;
|
andrewm@0
|
350 double x;
|
andrewm@0
|
351 int ix;
|
andrewm@0
|
352 double dx;
|
andrewm@0
|
353
|
andrewm@0
|
354
|
andrewm@0
|
355
|
andrewm@0
|
356 // channelData is an array of length numSamples which contains the audio for one channel
|
b@1
|
357 float* channelData = buffer.getWritePointer(channel);
|
andrewm@0
|
358
|
andrewm@0
|
359 // inputBufferData is the circular buffer for collecting input samples for the FFT
|
b@1
|
360 float* inputBufferData = inputBuffer_.getWritePointer(jmin (channel, inputBuffer_.getNumChannels() - 1));
|
b@1
|
361 float* outputBufferData = outputBuffer_.getWritePointer(jmin (channel, inputBuffer_.getNumChannels() - 1));
|
andrewm@0
|
362
|
andrewm@0
|
363 // State variables need to be temporarily cached for each channel. We don't want the
|
andrewm@0
|
364 // operations on one channel to affect the identical behaviour of the next channel
|
andrewm@0
|
365 inwritepos = inputBufferWritePosition_;
|
andrewm@0
|
366 outwritepos = outputBufferWritePosition_;
|
andrewm@0
|
367 outreadpos = outputBufferReadPosition_;
|
andrewm@0
|
368 sampsincefft = samplesSinceLastFFT_;
|
andrewm@0
|
369
|
andrewm@0
|
370 for (int i = 0; i < numSamples; ++i)
|
andrewm@0
|
371 {
|
andrewm@0
|
372 const float in = channelData[i];
|
andrewm@0
|
373
|
andrewm@0
|
374 // Store the next buffered sample in the output. Do this first before anything
|
andrewm@0
|
375 // changes the output buffer-- we will have at least one FFT size worth of data
|
andrewm@0
|
376 // stored and ready to go. Set the result to 0 when finished in preparation for the
|
andrewm@0
|
377 // next overlap/add procedure.
|
andrewm@0
|
378 channelData[i] = outputBufferData[outreadpos];
|
andrewm@0
|
379 outputBufferData[outreadpos] = 0.0;
|
andrewm@0
|
380 if(++outreadpos >= outputBufferLength_)
|
andrewm@0
|
381 outreadpos = 0;
|
andrewm@0
|
382
|
andrewm@0
|
383 // Store the current sample in the input buffer, incrementing the write pointer. Also
|
andrewm@0
|
384 // increment how many samples we've stored since the last transform. If it reaches the
|
andrewm@0
|
385 // hop size, perform an FFT and any frequency-domain processing.
|
andrewm@0
|
386 inputBufferData[inwritepos] = in;
|
andrewm@0
|
387 if (++inwritepos >= inputBufferLength_)
|
andrewm@0
|
388 inwritepos = 0;
|
andrewm@0
|
389 if (++sampsincefft >= hopActualSize_)
|
andrewm@0
|
390 {
|
andrewm@0
|
391 sampsincefft = 0;
|
andrewm@0
|
392
|
andrewm@0
|
393 // Find the index of the starting sample in the buffer. When the buffer length
|
andrewm@0
|
394 // is equal to the transform size, this will be the current write position but
|
andrewm@0
|
395 // this code is more general for larger buffers.
|
andrewm@0
|
396 int inputBufferStartPosition = (inwritepos + inputBufferLength_
|
andrewm@0
|
397 - fftActualTransformSize_) % inputBufferLength_;
|
andrewm@0
|
398
|
andrewm@0
|
399 // Window the buffer and copy it into the FFT input
|
andrewm@0
|
400 int inputBufferIndex = inputBufferStartPosition;
|
andrewm@0
|
401 for(int fftBufferIndex = 0; fftBufferIndex < fftActualTransformSize_; fftBufferIndex++)
|
andrewm@0
|
402 {
|
andrewm@0
|
403 // Set real part to windowed signal; imaginary part to 0.
|
andrewm@0
|
404 fftTimeDomain_[fftBufferIndex][1] = 0.0;
|
andrewm@0
|
405 if(fftBufferIndex >= windowBufferLength_) // Safety check, in case window isn't ready
|
andrewm@0
|
406 fftTimeDomain_[fftBufferIndex][0] = 0.0;
|
andrewm@0
|
407 else
|
andrewm@0
|
408 fftTimeDomain_[fftBufferIndex][0] = windowBuffer_[fftBufferIndex]
|
andrewm@0
|
409 * inputBufferData[inputBufferIndex];
|
andrewm@0
|
410 inputBufferIndex++;
|
andrewm@0
|
411 if(inputBufferIndex >= inputBufferLength_)
|
andrewm@0
|
412 inputBufferIndex = 0;
|
andrewm@0
|
413 }
|
andrewm@0
|
414
|
andrewm@0
|
415 // Perform the FFT on the windowed data, going into the frequency domain.
|
andrewm@0
|
416 // Result will be in fftFrequencyDomain_
|
andrewm@0
|
417 fftw_execute(fftForwardPlan_);
|
andrewm@0
|
418
|
andrewm@0
|
419 // ********** PHASE VOCODER PROCESSING GOES HERE **************
|
andrewm@0
|
420 // This is the place where frequency-domain calculations are made
|
andrewm@0
|
421 // on the transformed signal. Put the result back into fftFrequencyDomain_
|
andrewm@0
|
422 // before transforming back.
|
andrewm@0
|
423
|
andrewm@0
|
424 // (⊙_⊙)
|
andrewm@0
|
425
|
andrewm@0
|
426 for (int i = 0; i<fftActualTransformSize_; i++)
|
andrewm@0
|
427 {
|
andrewm@0
|
428 // (⊙_⊙) first turn the fft from real-imaginary to amplitude-phase
|
andrewm@0
|
429 double amplitude = sqrt(fftFrequencyDomain_[i][0]*fftFrequencyDomain_[i][0]+fftFrequencyDomain_[i][1]*fftFrequencyDomain_[i][1]);
|
andrewm@0
|
430 double phase = atan2(fftFrequencyDomain_[i][1], fftFrequencyDomain_[i][0]);
|
andrewm@0
|
431
|
andrewm@0
|
432 // (⊙_⊙) change the phase
|
andrewm@0
|
433 dphi_[i][channel]= /*princArg(phase - phi0_[i][channel]);*/omega_[i]+ princArg(phase - phi0_[i][channel] - omega_[i]);
|
andrewm@0
|
434 phi0_[i][channel] = phase;
|
andrewm@0
|
435 psi_[i][channel] = princArg(psi_[i][channel] + dphi_[i][channel]*actualRatio_);
|
andrewm@0
|
436
|
andrewm@0
|
437 // (⊙_⊙) turn back to real-imaginary form
|
andrewm@0
|
438 fftFrequencyDomain_[i][0] = amplitude*cos(psi_[i][channel]);
|
andrewm@0
|
439 fftFrequencyDomain_[i][1] = amplitude*sin(psi_[i][channel]);
|
andrewm@0
|
440
|
andrewm@0
|
441 }
|
andrewm@0
|
442
|
andrewm@0
|
443 // In this example, we don't do anything except reconstruct the original
|
andrewm@0
|
444 // signal to show that the whole infrastructure works.
|
andrewm@0
|
445 // ************************************************************
|
andrewm@0
|
446
|
andrewm@0
|
447 // Perform the inverse FFT to get back to the time domain. Result wll be
|
andrewm@0
|
448 // in fftTimeDomain_. If we've done it right (kept the frequency domain
|
andrewm@0
|
449 // symmetric), the time domain resuld should be strictly real allowing us
|
andrewm@0
|
450 // to ignore the imaginary part.
|
andrewm@0
|
451 fftw_execute(fftBackwardPlan_);
|
andrewm@0
|
452
|
andrewm@0
|
453 // (⊙_⊙) gain2 is actually same with the ifft frame except that it is one element longer
|
andrewm@0
|
454 for (int i = 0;i<fftActualTransformSize_; i++)
|
andrewm@0
|
455 grain2[i] = fftTimeDomain_[i][0];
|
andrewm@0
|
456
|
andrewm@0
|
457 // (⊙_⊙) resampling using linear interpolation and get grain3
|
andrewm@0
|
458 for (int i = 0; i<floor(pitchActualShiftRec_*fftActualTransformSize_); i++)
|
andrewm@0
|
459 {
|
andrewm@0
|
460 lx = floor(pitchActualShiftRec_*fftActualTransformSize_);
|
andrewm@0
|
461 x = i*fftActualTransformSize_/lx;
|
andrewm@0
|
462 ix = floor(x);
|
andrewm@0
|
463 dx = x - (double)ix;
|
andrewm@0
|
464 grain3 [i] = grain2[ix]*(1.0 - dx) + grain2[ix+1]*dx;
|
andrewm@0
|
465 }
|
andrewm@0
|
466
|
andrewm@0
|
467 // Add the result to the output buffer, starting at the current write position
|
andrewm@0
|
468 // (Output buffer will have been zeroed after reading the last time around)
|
andrewm@0
|
469 // Output needs to be scaled by the transform size to get back to original amplitude:
|
andrewm@0
|
470 // this is a property of how fftw is implemented. Scaling will also need to be adjusted
|
andrewm@0
|
471 // based on hop size to get the same output level (smaller hop size produces more overlap
|
andrewm@0
|
472 // and hence higher signal level)
|
andrewm@0
|
473 int outputBufferIndex = outwritepos;
|
andrewm@0
|
474
|
andrewm@0
|
475
|
andrewm@0
|
476 // (⊙_⊙) Synthesizing
|
andrewm@0
|
477 //for(int fftBufferIndex = 0; fftBufferIndex < fftActualTransformSize_; fftBufferIndex++)
|
andrewm@0
|
478 for(int fftBufferIndex = 0; fftBufferIndex < floor(pitchActualShiftRec_*fftActualTransformSize_); fftBufferIndex++)
|
andrewm@0
|
479 {
|
andrewm@0
|
480 if (fftBufferIndex > synthesisWindowBufferLength_)
|
andrewm@0
|
481 outputBufferData[outputBufferIndex] += 0;
|
andrewm@0
|
482 else
|
andrewm@0
|
483 outputBufferData[outputBufferIndex] += grain3[fftBufferIndex] * fftScaleFactor_ *synthesisWindowBuffer_[fftBufferIndex];
|
andrewm@0
|
484
|
andrewm@0
|
485 if(++outputBufferIndex >= outputBufferLength_)
|
andrewm@0
|
486 outputBufferIndex = 0;
|
andrewm@0
|
487 }
|
andrewm@0
|
488
|
andrewm@0
|
489 // Advance the write position within the buffer by the hop size
|
andrewm@0
|
490 outwritepos = (outwritepos + hopActualSize_) % outputBufferLength_;
|
andrewm@0
|
491 }
|
andrewm@0
|
492 }
|
andrewm@0
|
493 }
|
andrewm@0
|
494
|
andrewm@0
|
495 // Having made a local copy of the state variables for each channel, now transfer the result
|
andrewm@0
|
496 // back to the main state variable so they will be preserved for the next call of processBlock()
|
andrewm@0
|
497 inputBufferWritePosition_ = inwritepos;
|
andrewm@0
|
498 outputBufferWritePosition_ = outwritepos;
|
andrewm@0
|
499 outputBufferReadPosition_ = outreadpos;
|
andrewm@0
|
500 samplesSinceLastFFT_ = sampsincefft;
|
andrewm@0
|
501
|
andrewm@0
|
502 // In case we have more outputs than inputs, we'll clear any output
|
andrewm@0
|
503 // channels that didn't contain input data, (because these aren't
|
andrewm@0
|
504 // guaranteed to be empty - they may contain garbage).
|
andrewm@0
|
505 for (int i = numInputChannels; i < numOutputChannels; ++i)
|
andrewm@0
|
506 {
|
andrewm@0
|
507 buffer.clear (i, 0, buffer.getNumSamples());
|
andrewm@0
|
508 }
|
andrewm@0
|
509
|
andrewm@0
|
510 fftSpinLock_.exit();
|
andrewm@0
|
511 }
|
andrewm@0
|
512
|
andrewm@0
|
513 //==============================================================================
|
andrewm@0
|
514 bool PVOCPitchShiftAudioProcessor::hasEditor() const
|
andrewm@0
|
515 {
|
andrewm@0
|
516 return true; // (change this to false if you choose to not supply an editor)
|
andrewm@0
|
517 }
|
andrewm@0
|
518
|
andrewm@0
|
519 AudioProcessorEditor* PVOCPitchShiftAudioProcessor::createEditor()
|
andrewm@0
|
520 {
|
andrewm@0
|
521 return new PVOCPitchShiftAudioProcessorEditor (this);
|
andrewm@0
|
522 }
|
andrewm@0
|
523
|
andrewm@0
|
524 //==============================================================================
|
andrewm@0
|
525 void PVOCPitchShiftAudioProcessor::getStateInformation (MemoryBlock& destData)
|
andrewm@0
|
526 {
|
andrewm@0
|
527 // You should use this method to store your parameters in the memory block.
|
andrewm@0
|
528 // You could do that either as raw data, or use the XML or ValueTree classes
|
andrewm@0
|
529 // as intermediaries to make it easy to save and load complex data.
|
andrewm@0
|
530
|
andrewm@0
|
531 // Create an outer XML element..
|
andrewm@0
|
532 XmlElement xml("C4DMPLUGINSETTINGS");
|
andrewm@0
|
533
|
andrewm@0
|
534 // add some attributes to it..
|
andrewm@0
|
535 xml.setAttribute("uiWidth", lastUIWidth_);
|
andrewm@0
|
536 xml.setAttribute("uiHeight", lastUIHeight_);
|
andrewm@0
|
537 xml.setAttribute("fftSize", fftSelectedSize_);
|
andrewm@0
|
538 xml.setAttribute("hopSize", hopSelectedSize_);
|
andrewm@0
|
539 xml.setAttribute("windowType", windowType_);
|
andrewm@0
|
540 xml.setAttribute("pitchShift", pitchSelectedShift_); // (⊙_⊙)
|
andrewm@0
|
541
|
andrewm@0
|
542 // then use this helper function to stuff it into the binary blob and return it..
|
andrewm@0
|
543 copyXmlToBinary(xml, destData);
|
andrewm@0
|
544 }
|
andrewm@0
|
545
|
andrewm@0
|
546 void PVOCPitchShiftAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
|
andrewm@0
|
547 {
|
andrewm@0
|
548 // You should use this method to restore your parameters from this memory block,
|
andrewm@0
|
549 // whose contents will have been created by the getStateInformation() call.
|
andrewm@0
|
550
|
andrewm@0
|
551 // This getXmlFromBinary() helper function retrieves our XML from the binary blob..
|
andrewm@0
|
552 ScopedPointer<XmlElement> xmlState (getXmlFromBinary (data, sizeInBytes));
|
andrewm@0
|
553
|
andrewm@0
|
554 if(xmlState != 0)
|
andrewm@0
|
555 {
|
andrewm@0
|
556 // make sure that it's actually our type of XML object..
|
andrewm@0
|
557 if(xmlState->hasTagName("C4DMPLUGINSETTINGS"))
|
andrewm@0
|
558 {
|
andrewm@0
|
559 // ok, now pull out our parameters..
|
andrewm@0
|
560 lastUIWidth_ = xmlState->getIntAttribute("uiWidth", lastUIWidth_);
|
andrewm@0
|
561 lastUIHeight_ = xmlState->getIntAttribute("uiHeight", lastUIHeight_);
|
andrewm@0
|
562
|
andrewm@0
|
563 fftSelectedSize_ = (int)xmlState->getDoubleAttribute("fftSize", fftSelectedSize_);
|
andrewm@0
|
564 hopSelectedSize_ = (int)xmlState->getDoubleAttribute("hopSize", hopSelectedSize_);
|
andrewm@0
|
565 windowType_ = (int)xmlState->getDoubleAttribute("windowType", windowType_);
|
andrewm@0
|
566 // (⊙_⊙)
|
andrewm@0
|
567 pitchSelectedShift_ = (int)xmlState->getDoubleAttribute("pitchShift", pitchSelectedShift_);
|
andrewm@0
|
568
|
andrewm@0
|
569
|
andrewm@0
|
570 if(preparedToPlay_)
|
andrewm@0
|
571 {
|
andrewm@0
|
572 // Update settings if currently playing, else wait until prepareToPlay() called
|
andrewm@0
|
573 initFFT(fftSelectedSize_);
|
andrewm@0
|
574 initWindow(fftSelectedSize_, windowType_);
|
andrewm@0
|
575 initSynthesisWindow(floor(fftSelectedSize_*pitchActualShiftRec_), windowType_);
|
andrewm@0
|
576 }
|
andrewm@0
|
577 }
|
andrewm@0
|
578 }
|
andrewm@0
|
579 }
|
andrewm@0
|
580
|
andrewm@0
|
581 //==============================================================================
|
andrewm@0
|
582 // Initialise the FFT data structures for a given length transform
|
andrewm@0
|
583 void PVOCPitchShiftAudioProcessor::initFFT(int length)
|
andrewm@0
|
584 {
|
andrewm@0
|
585 if(fftInitialised_)
|
andrewm@0
|
586 deinitFFT();
|
andrewm@0
|
587
|
andrewm@0
|
588 // Save the current length so we know how big our results are later
|
andrewm@0
|
589 fftActualTransformSize_ = length;
|
andrewm@0
|
590
|
andrewm@0
|
591 // Here we allocate the complex-number buffers for the FFT. This uses
|
andrewm@0
|
592 // a convenient wrapper on the more general fftw_malloc()
|
andrewm@0
|
593 fftTimeDomain_ = fftw_alloc_complex(length);
|
andrewm@0
|
594 fftFrequencyDomain_ = fftw_alloc_complex(length);
|
andrewm@0
|
595
|
andrewm@0
|
596 // FFTW_ESTIMATE doesn't necessarily produce the fastest executing code (FFTW_MEASURE
|
andrewm@0
|
597 // will get closer) but it carries a minimum startup cost. FFTW_MEASURE might stall for
|
andrewm@0
|
598 // several seconds which would be annoying in an audio plug-in context.
|
andrewm@0
|
599 fftForwardPlan_ = fftw_plan_dft_1d(fftActualTransformSize_, fftTimeDomain_,
|
andrewm@0
|
600 fftFrequencyDomain_, FFTW_FORWARD, FFTW_ESTIMATE);
|
andrewm@0
|
601 fftBackwardPlan_ = fftw_plan_dft_1d(fftActualTransformSize_, fftFrequencyDomain_,
|
andrewm@0
|
602 fftTimeDomain_, FFTW_BACKWARD, FFTW_ESTIMATE);
|
andrewm@0
|
603
|
andrewm@0
|
604 // Allocate the buffer that the samples will be collected in
|
andrewm@0
|
605 inputBufferLength_ = fftActualTransformSize_;
|
andrewm@0
|
606 inputBuffer_.setSize(2, inputBufferLength_);
|
andrewm@0
|
607 inputBuffer_.clear();
|
andrewm@0
|
608 inputBufferWritePosition_ = 0;
|
andrewm@0
|
609 samplesSinceLastFFT_ = 0;
|
andrewm@0
|
610
|
andrewm@0
|
611 // Allocate the output buffer to be twice the size of the FFT
|
andrewm@0
|
612 // This will be enough for all hop size cases
|
andrewm@0
|
613 outputBufferLength_ = 2*fftActualTransformSize_;
|
andrewm@0
|
614 outputBuffer_.setSize(2, outputBufferLength_);
|
andrewm@0
|
615 outputBuffer_.clear();
|
andrewm@0
|
616 outputBufferReadPosition_ = 0;
|
andrewm@0
|
617
|
andrewm@0
|
618 updateHopSize();
|
andrewm@0
|
619
|
andrewm@0
|
620 //(⊙_⊙)
|
andrewm@0
|
621 updatePitchShift();
|
andrewm@0
|
622
|
andrewm@0
|
623 fftInitialised_ = true;
|
andrewm@0
|
624 }
|
andrewm@0
|
625
|
andrewm@0
|
626 // Free the FFT data structures
|
andrewm@0
|
627 void PVOCPitchShiftAudioProcessor::deinitFFT()
|
andrewm@0
|
628 {
|
andrewm@0
|
629 if(!fftInitialised_)
|
andrewm@0
|
630 return;
|
andrewm@0
|
631
|
andrewm@0
|
632 // Prevent this variable from changing while an audio callback is running.
|
andrewm@0
|
633 // Once it has changed, the next audio callback will find that it's not
|
andrewm@0
|
634 // initialised and will return silence instead of attempting to work with the
|
andrewm@0
|
635 // (invalid) FFT structures. This produces an audible glitch but no crash,
|
andrewm@0
|
636 // and is the simplest way to handle parameter changes in this example code.
|
andrewm@0
|
637 fftSpinLock_.enter();
|
andrewm@0
|
638 fftInitialised_ = false;
|
andrewm@0
|
639 fftSpinLock_.exit();
|
andrewm@0
|
640
|
andrewm@0
|
641 fftw_destroy_plan(fftForwardPlan_);
|
andrewm@0
|
642 fftw_destroy_plan(fftBackwardPlan_);
|
andrewm@0
|
643 fftw_free(fftTimeDomain_);
|
andrewm@0
|
644 fftw_free(fftFrequencyDomain_);
|
andrewm@0
|
645
|
andrewm@0
|
646 // Leave the input buffer in memory until the plugin is released
|
andrewm@0
|
647 }
|
andrewm@0
|
648
|
andrewm@0
|
649 //==============================================================================
|
andrewm@0
|
650 // Create a new window of a given length and type
|
andrewm@0
|
651 void PVOCPitchShiftAudioProcessor::initWindow(int length, int windowType)
|
andrewm@0
|
652 {
|
andrewm@0
|
653 if(windowBuffer_ != 0)
|
andrewm@0
|
654 deinitWindow();
|
andrewm@0
|
655 if(length == 0) // Sanity check
|
andrewm@0
|
656 return;
|
andrewm@0
|
657
|
andrewm@0
|
658 // Allocate memory for the window
|
andrewm@0
|
659 windowBuffer_ = (double *)malloc(length * sizeof(double));
|
andrewm@0
|
660
|
andrewm@0
|
661 // Write the length as a double here to simplify the code below (otherwise
|
andrewm@0
|
662 // typecasts would be wise)
|
andrewm@0
|
663 double windowLength = length;
|
andrewm@0
|
664
|
andrewm@0
|
665 // Set values for the window, depending on its type
|
andrewm@0
|
666 for(int i = 0; i < length; i++)
|
andrewm@0
|
667 {
|
andrewm@0
|
668 // Window functions are typically defined to be symmetrical. This will cause a
|
andrewm@0
|
669 // problem in the overlap-add process: the windows instead need to be periodic
|
andrewm@0
|
670 // when arranged end-to-end. As a result we calculate the window of one sample
|
andrewm@0
|
671 // larger than usual, and drop the last sample. (This works as long as N is even.)
|
andrewm@0
|
672 // See Julius Smith, "Spectral Audio Signal Processing" for details.
|
andrewm@0
|
673 switch(windowType)
|
andrewm@0
|
674 {
|
andrewm@0
|
675 case kWindowBartlett:
|
andrewm@0
|
676 windowBuffer_[i] = (2.0/(windowLength + 2.0))*
|
andrewm@0
|
677 (0.5*(windowLength + 2.0) - abs((double)i - 0.5*windowLength));
|
andrewm@0
|
678 break;
|
andrewm@0
|
679 case kWindowHann:
|
andrewm@0
|
680 windowBuffer_[i] = 0.5*(1.0 - cos(2.0*M_PI*(double)i/windowLength));
|
andrewm@0
|
681 break;
|
andrewm@0
|
682 case kWindowHamming:
|
andrewm@0
|
683 windowBuffer_[i] = 0.54 - 0.46*cos(2.0*M_PI*(double)i/windowLength);
|
andrewm@0
|
684 break;
|
andrewm@0
|
685 case kWindowRectangular:
|
andrewm@0
|
686 default:
|
andrewm@0
|
687 windowBuffer_[i] = 1.0;
|
andrewm@0
|
688 break;
|
andrewm@0
|
689 }
|
andrewm@0
|
690 }
|
andrewm@0
|
691
|
andrewm@0
|
692 windowBufferLength_ = length;
|
andrewm@0
|
693 updateScaleFactor();
|
andrewm@0
|
694 }
|
andrewm@0
|
695
|
andrewm@0
|
696 //==============================================================================
|
andrewm@0
|
697 // Create a new synthesis window of a given length and type
|
andrewm@0
|
698 void PVOCPitchShiftAudioProcessor::initSynthesisWindow(int length, int windowType)
|
andrewm@0
|
699 {
|
andrewm@0
|
700 if(synthesisWindowBuffer_ != 0)
|
andrewm@0
|
701 deinitSynthesisWindow();
|
andrewm@0
|
702 if(length == 0) // Sanity check
|
andrewm@0
|
703 return;
|
andrewm@0
|
704
|
andrewm@0
|
705 // Allocate memory for the window
|
andrewm@0
|
706 synthesisWindowBuffer_ = (double *)malloc(length * sizeof(double));
|
andrewm@0
|
707
|
andrewm@0
|
708 // Write the length as a double here to simplify the code below (otherwise
|
andrewm@0
|
709 // typecasts would be wise)
|
andrewm@0
|
710 double windowLength = length;
|
andrewm@0
|
711
|
andrewm@0
|
712 // Set values for the window, depending on its type
|
andrewm@0
|
713 for(int i = 0; i < length; i++)
|
andrewm@0
|
714 {
|
andrewm@0
|
715 // Window functions are typically defined to be symmetrical. This will cause a
|
andrewm@0
|
716 // problem in the overlap-add process: the windows instead need to be periodic
|
andrewm@0
|
717 // when arranged end-to-end. As a result we calculate the window of one sample
|
andrewm@0
|
718 // larger than usual, and drop the last sample. (This works as long as N is even.)
|
andrewm@0
|
719 // See Julius Smith, "Spectral Audio Signal Processing" for details.
|
andrewm@0
|
720 switch(windowType)
|
andrewm@0
|
721 {
|
andrewm@0
|
722 case kWindowBartlett:
|
andrewm@0
|
723 synthesisWindowBuffer_[i] = (2.0/(windowLength + 2.0))*
|
andrewm@0
|
724 (0.5*(windowLength + 2.0) - abs((double)i - 0.5*windowLength));
|
andrewm@0
|
725 break;
|
andrewm@0
|
726 case kWindowHann:
|
andrewm@0
|
727 synthesisWindowBuffer_[i] = 0.5*(1.0 - cos(2.0*M_PI*(double)i/windowLength));
|
andrewm@0
|
728 break;
|
andrewm@0
|
729 case kWindowHamming:
|
andrewm@0
|
730 synthesisWindowBuffer_[i] = 0.54 - 0.46*cos(2.0*M_PI*(double)i/windowLength);
|
andrewm@0
|
731 break;
|
andrewm@0
|
732 case kWindowRectangular:
|
andrewm@0
|
733 default:
|
andrewm@0
|
734 synthesisWindowBuffer_[i] = 1.0;
|
andrewm@0
|
735 break;
|
andrewm@0
|
736 }
|
andrewm@0
|
737 }
|
andrewm@0
|
738
|
andrewm@0
|
739 synthesisWindowBufferLength_ = length;
|
andrewm@0
|
740 updateScaleFactor();
|
andrewm@0
|
741 }
|
andrewm@0
|
742
|
andrewm@0
|
743 // Free the window buffer
|
andrewm@0
|
744 void PVOCPitchShiftAudioProcessor::deinitWindow()
|
andrewm@0
|
745 {
|
andrewm@0
|
746 if(windowBuffer_ == 0)
|
andrewm@0
|
747 return;
|
andrewm@0
|
748
|
andrewm@0
|
749 // Delay clearing the window until the audio thread is not running
|
andrewm@0
|
750 // to avoid a crash if the code tries to access an invalid window
|
andrewm@0
|
751 fftSpinLock_.enter();
|
andrewm@0
|
752 windowBufferLength_ = 0;
|
andrewm@0
|
753 fftSpinLock_.exit();
|
andrewm@0
|
754
|
andrewm@0
|
755 free(windowBuffer_);
|
andrewm@0
|
756 windowBuffer_ = 0;
|
andrewm@0
|
757 }
|
andrewm@0
|
758
|
andrewm@0
|
759 // Free the synthesis window buffer
|
andrewm@0
|
760 void PVOCPitchShiftAudioProcessor::deinitSynthesisWindow()
|
andrewm@0
|
761 {
|
andrewm@0
|
762 if(synthesisWindowBuffer_ == 0)
|
andrewm@0
|
763 return;
|
andrewm@0
|
764
|
andrewm@0
|
765 // Delay clearing the window until the audio thread is not running
|
andrewm@0
|
766 // to avoid a crash if the code tries to access an invalid window
|
andrewm@0
|
767 fftSpinLock_.enter();
|
andrewm@0
|
768 synthesisWindowBufferLength_ = 0;
|
andrewm@0
|
769 fftSpinLock_.exit();
|
andrewm@0
|
770
|
andrewm@0
|
771 free(synthesisWindowBuffer_);
|
andrewm@0
|
772 synthesisWindowBuffer_ = 0;
|
andrewm@0
|
773 }
|
andrewm@0
|
774
|
andrewm@0
|
775 // Update the actual hop size depending on the window size and hop size settings
|
andrewm@0
|
776 // Hop size is expressed as a fraction of a window in the parameters.
|
andrewm@0
|
777 void PVOCPitchShiftAudioProcessor::updateHopSize()
|
andrewm@0
|
778 {
|
andrewm@0
|
779 switch(hopSelectedSize_)
|
andrewm@0
|
780 {
|
andrewm@0
|
781 case kHopSize1Window:
|
andrewm@0
|
782 hopActualSize_ = fftActualTransformSize_;
|
andrewm@0
|
783 break;
|
andrewm@0
|
784 case kHopSize1_2Window:
|
andrewm@0
|
785 hopActualSize_ = fftActualTransformSize_ / 2;
|
andrewm@0
|
786 break;
|
andrewm@0
|
787 case kHopSize1_4Window:
|
andrewm@0
|
788 hopActualSize_ = fftActualTransformSize_ / 4;
|
andrewm@0
|
789 break;
|
andrewm@0
|
790 case kHopSize1_8Window:
|
andrewm@0
|
791 hopActualSize_ = fftActualTransformSize_ / 8;
|
andrewm@0
|
792 break;
|
andrewm@0
|
793 }
|
andrewm@0
|
794
|
andrewm@0
|
795 // Update the factor by which samples are scaled to preserve unity gain
|
andrewm@0
|
796 updateScaleFactor();
|
andrewm@0
|
797
|
andrewm@0
|
798 // Read pointer lags the write pointer to allow for FFT buffers to accumulate and
|
andrewm@0
|
799 // be processed. Total latency is sum of the FFT size and the hop size.
|
andrewm@0
|
800 outputBufferWritePosition_ = hopActualSize_ + fftActualTransformSize_;
|
andrewm@0
|
801 }
|
andrewm@0
|
802
|
andrewm@0
|
803
|
andrewm@0
|
804 // (⊙_⊙) Update the pitch shift
|
andrewm@0
|
805 void PVOCPitchShiftAudioProcessor::updatePitchShift()
|
andrewm@0
|
806 {
|
andrewm@0
|
807 switch(pitchSelectedShift_)
|
andrewm@0
|
808 {
|
andrewm@0
|
809 case kShift0:
|
andrewm@0
|
810 pitchActualShift_ = 1.0;
|
andrewm@0
|
811 break;
|
andrewm@0
|
812 case kShiftP1:
|
andrewm@0
|
813 pitchActualShift_ = pow(2.0, 1.0/12.0 );
|
andrewm@0
|
814 break;
|
andrewm@0
|
815 case kShiftP2:
|
andrewm@0
|
816 pitchActualShift_ = pow(2.0, 2.0/12.0 );
|
andrewm@0
|
817 break;
|
andrewm@0
|
818 case kShiftP3:
|
andrewm@0
|
819 pitchActualShift_ = pow(2.0, 3.0/12.0 );
|
andrewm@0
|
820 break;
|
andrewm@0
|
821 case kShiftP4:
|
andrewm@0
|
822 pitchActualShift_ = pow(2.0, 4.0/12.0 );
|
andrewm@0
|
823 break;
|
andrewm@0
|
824 case kShiftP5:
|
andrewm@0
|
825 pitchActualShift_ = pow(2.0, 5.0/12.0 );
|
andrewm@0
|
826 break;
|
andrewm@0
|
827 case kShiftP6:
|
andrewm@0
|
828 pitchActualShift_ = pow(2.0, 6.0/12.0 );
|
andrewm@0
|
829 break;
|
andrewm@0
|
830 case kShiftM1:
|
andrewm@0
|
831 pitchActualShift_ = pow(2.0, -1.0/12.0 );
|
andrewm@0
|
832 break;
|
andrewm@0
|
833 case kShiftM2:
|
andrewm@0
|
834 pitchActualShift_ = pow(2.0, -2.0/12.0 );
|
andrewm@0
|
835 break;
|
andrewm@0
|
836 case kShiftM3:
|
andrewm@0
|
837 pitchActualShift_ = pow(2.0, -3.0/12.0 );
|
andrewm@0
|
838 break;
|
andrewm@0
|
839 case kShiftM4:
|
andrewm@0
|
840 pitchActualShift_ = pow(2.0, -4.0/12.0 );
|
andrewm@0
|
841 break;
|
andrewm@0
|
842 case kShiftM5:
|
andrewm@0
|
843 pitchActualShift_ = pow(2.0, -5.0/12.0 );
|
andrewm@0
|
844 break;
|
andrewm@0
|
845 case kShiftM6:
|
andrewm@0
|
846 pitchActualShift_ = pow(2.0, -6.0/12.0 );
|
andrewm@0
|
847 break;
|
andrewm@0
|
848 }
|
andrewm@0
|
849 actualRatio_ = round(pitchActualShift_*hopActualSize_)/hopActualSize_;
|
andrewm@0
|
850 pitchActualShiftRec_ = 1/pitchActualShift_;
|
andrewm@0
|
851 }
|
andrewm@0
|
852
|
andrewm@0
|
853 // (⊙_⊙) principle phase argument mod(phasein+pi,-2*pi)+pi;
|
andrewm@0
|
854 double PVOCPitchShiftAudioProcessor::princArg(double phaseIn)
|
andrewm@0
|
855 {
|
andrewm@0
|
856 if (phaseIn >= 0)
|
andrewm@0
|
857 return fmod(phaseIn + M_PI, 2*M_PI) - M_PI;
|
andrewm@0
|
858 else
|
andrewm@0
|
859 return fmod(phaseIn + M_PI, -2*M_PI) + M_PI;
|
andrewm@0
|
860 }
|
andrewm@0
|
861
|
andrewm@0
|
862 // Update the factor by which each output sample is scaled. This needs to update
|
andrewm@0
|
863 // every time FFT size, hop size, and window type are changed.
|
andrewm@0
|
864 void PVOCPitchShiftAudioProcessor::updateScaleFactor()
|
andrewm@0
|
865 {
|
andrewm@0
|
866 // The gain needs to be normalised by the sum of the window, which implicitly
|
andrewm@0
|
867 // accounts for the length of the transform and the window type. From there
|
andrewm@0
|
868 // we also update based on hop size: smaller hop means more overlap means the
|
andrewm@0
|
869 // overall gain should be reduced.
|
andrewm@0
|
870 double windowSum = 0.0;
|
andrewm@0
|
871
|
andrewm@0
|
872 for(int i = 0; i < windowBufferLength_; i++)
|
andrewm@0
|
873 {
|
andrewm@0
|
874 windowSum += windowBuffer_[i];
|
andrewm@0
|
875 }
|
andrewm@0
|
876
|
andrewm@0
|
877 if(windowSum == 0.0)
|
andrewm@0
|
878 fftScaleFactor_ = 0.0; // Catch invalid cases and mute output
|
andrewm@0
|
879 else
|
andrewm@0
|
880 {
|
andrewm@0
|
881 switch(hopSelectedSize_)
|
andrewm@0
|
882 {
|
andrewm@0
|
883 case kHopSize1Window: // 0dB
|
andrewm@0
|
884 fftScaleFactor_ = 1.0/(double)windowSum;
|
andrewm@0
|
885 break;
|
andrewm@0
|
886 case kHopSize1_2Window: // -6dB
|
andrewm@0
|
887 fftScaleFactor_ = 0.5/(double)windowSum;
|
andrewm@0
|
888 break;
|
andrewm@0
|
889 case kHopSize1_4Window: // -12dB
|
andrewm@0
|
890 fftScaleFactor_ = 0.25/(double)windowSum;
|
andrewm@0
|
891 break;
|
andrewm@0
|
892 case kHopSize1_8Window: // -18dB
|
andrewm@0
|
893 fftScaleFactor_ = 0.125/(double)windowSum;
|
andrewm@0
|
894 break;
|
andrewm@0
|
895 }
|
andrewm@0
|
896 }
|
andrewm@0
|
897 }
|
andrewm@0
|
898
|
andrewm@0
|
899 //==============================================================================
|
andrewm@0
|
900 // This creates new instances of the plugin..
|
andrewm@0
|
901 AudioProcessor* JUCE_CALLTYPE createPluginFilter()
|
andrewm@0
|
902 {
|
andrewm@0
|
903 return new PVOCPitchShiftAudioProcessor();
|
andrewm@0
|
904 }
|