y@0
|
1 /*
|
y@0
|
2 ==============================================================================
|
y@0
|
3
|
y@0
|
4 This file was auto-generated!
|
y@0
|
5
|
y@0
|
6 It contains the basic startup code for a Juce application.
|
y@0
|
7
|
y@0
|
8 ==============================================================================
|
y@0
|
9 */
|
y@0
|
10
|
y@0
|
11 #include "PluginProcessor.h"
|
y@0
|
12 #include "PluginEditor.h"
|
y@0
|
13
|
y@0
|
14
|
y@0
|
15 //==============================================================================
|
y@0
|
16
|
y@0
|
17 #define M_PI 3.14159265358979323846 /* pi */
|
y@0
|
18
|
y@0
|
19 ADRessAudioProcessor::ADRessAudioProcessor() : inputBuffer_(2, 1), outputBuffer_(2, 1)
|
y@0
|
20 {
|
y@0
|
21 // Set default values:
|
y@0
|
22 fftSelectedSize_ = 2048;
|
y@0
|
23 hopSelectedSize_ = kHopSize1_8Window;
|
y@0
|
24 windowType_ = kWindowHann;
|
y@0
|
25
|
y@0
|
26 fftInitialised_ = false;
|
y@0
|
27 fftActualTransformSize_ = 0;
|
y@0
|
28 inputBufferLength_ = 1;
|
y@0
|
29 outputBufferLength_ = 1;
|
y@0
|
30 inputBufferWritePosition_ = outputBufferWritePosition_ = outputBufferReadPosition_ = 0;
|
y@0
|
31 samplesSinceLastFFT_ = 0;
|
y@0
|
32 windowBuffer_ = 0;
|
y@0
|
33 windowBufferLength_ = 0;
|
y@0
|
34 preparedToPlay_ = false;
|
y@0
|
35 fftScaleFactor_ = 0.0;
|
y@0
|
36 beta_ = 5;
|
y@0
|
37 width_ = 0;
|
y@0
|
38 azimuth_ = beta_;
|
y@0
|
39
|
y@0
|
40
|
y@0
|
41 indMinL_ = beta_;
|
y@0
|
42 indMaxL_ = beta_;
|
y@0
|
43 indMinR_ = beta_;
|
y@0
|
44 indMaxR_ = beta_;
|
y@0
|
45 computeR_ = true;
|
y@0
|
46 computeL_ = true;
|
y@0
|
47 invGain_ = 2;
|
y@0
|
48 invBeta_ = 1/(float)beta_;
|
y@0
|
49 i1 = std::complex<double>(0,1);
|
y@0
|
50
|
y@0
|
51
|
y@0
|
52 lastUIWidth_ = 360;
|
y@0
|
53 lastUIHeight_ = 400;
|
y@0
|
54 }
|
y@0
|
55
|
y@0
|
56 ADRessAudioProcessor::~ADRessAudioProcessor()
|
y@0
|
57 {
|
y@0
|
58 // Release FFT resources if allocated. This should be handled by
|
y@0
|
59 // releaseResources() but in the event it doesn't happen, this avoids
|
y@0
|
60 // a leak. Harmless to call it twice.
|
y@0
|
61 deinitFFT();
|
y@0
|
62 deinitWindow();
|
y@0
|
63 }
|
y@0
|
64
|
y@0
|
65 //==============================================================================
|
y@0
|
66 const String ADRessAudioProcessor::getName() const
|
y@0
|
67 {
|
y@0
|
68 return JucePlugin_Name;
|
y@0
|
69 }
|
y@0
|
70
|
y@0
|
71 int ADRessAudioProcessor::getNumParameters()
|
y@0
|
72 {
|
y@0
|
73 return kNumParameters;
|
y@0
|
74 }
|
y@0
|
75
|
y@0
|
76 float ADRessAudioProcessor::getParameter (int index)
|
y@0
|
77 {
|
y@0
|
78 // This method will be called by the host, probably on the audio thread, so
|
y@0
|
79 // it's absolutely time-critical. Don't use critical sections or anything
|
y@0
|
80 // UI-related, or anything at all that may block in any way!
|
y@0
|
81 switch (index)
|
y@0
|
82 {
|
y@0
|
83 case kFFTSizeParam: return (float)fftSelectedSize_;
|
y@0
|
84 case kHopSizeParam: return (float)hopSelectedSize_;
|
y@0
|
85 case kWindowTypeParam: return (float)windowType_;
|
y@0
|
86 case kWidthParam: return (float)width_;
|
y@0
|
87 case kAzimuthParam: return (float)azimuth_;
|
y@0
|
88
|
y@0
|
89 default: return 0.0f;
|
y@0
|
90 }
|
y@0
|
91 }
|
y@0
|
92
|
y@0
|
93 void ADRessAudioProcessor::setParameter (int index, float newValue)
|
y@0
|
94 {
|
y@0
|
95 // This method will be called by the host, probably on the audio thread, so
|
y@0
|
96 // it's absolutely time-critical. Don't use critical sections or anything
|
y@0
|
97 // UI-related, or anything at all that may block in any way!
|
y@0
|
98 switch (index)
|
y@0
|
99 {
|
y@0
|
100 case kFFTSizeParam:
|
y@0
|
101 if((int)newValue != fftSelectedSize_)
|
y@0
|
102 {
|
y@0
|
103 fftSelectedSize_ = (int)newValue;
|
y@0
|
104 if(preparedToPlay_)
|
y@0
|
105 {
|
y@0
|
106 // Update settings if currently playing, else wait until prepareToPlay() called
|
y@0
|
107 initFFT(fftSelectedSize_);
|
y@0
|
108 initWindow(fftSelectedSize_, windowType_);
|
y@0
|
109 }
|
y@0
|
110 }
|
y@0
|
111 break;
|
y@0
|
112 case kHopSizeParam:
|
y@0
|
113 hopSelectedSize_ = (int)newValue;
|
y@0
|
114 if(preparedToPlay_)
|
y@0
|
115 updateHopSize();
|
y@0
|
116 break;
|
y@0
|
117 case kWindowTypeParam:
|
y@0
|
118 // Recalculate window if needed
|
y@0
|
119 if((int)newValue != windowType_)
|
y@0
|
120 {
|
y@0
|
121 windowType_ = (int)newValue;
|
y@0
|
122 if(preparedToPlay_)
|
y@0
|
123 initWindow(fftActualTransformSize_, (int)newValue);
|
y@0
|
124 }
|
y@0
|
125 break;
|
y@0
|
126
|
y@0
|
127 case kWidthParam:
|
y@0
|
128 width_ = (int)newValue;
|
y@0
|
129 changeParams();
|
y@0
|
130 break;
|
y@0
|
131
|
y@0
|
132 case kAzimuthParam:
|
y@0
|
133 azimuth_ = (int)newValue;
|
y@0
|
134 changeParams();
|
y@0
|
135 break;
|
y@0
|
136
|
y@0
|
137 default:
|
y@0
|
138 break;
|
y@0
|
139 }
|
y@0
|
140 }
|
y@0
|
141
|
y@0
|
142 void ADRessAudioProcessor::changeParams()
|
y@0
|
143 {
|
y@0
|
144 int maxA = std::min(azimuth_ + width_,2*beta_);
|
y@0
|
145 int minA = std::max(azimuth_ - width_,0);
|
y@0
|
146 if (maxA >= beta_)
|
y@0
|
147 {
|
y@0
|
148 computeR_ = true;
|
y@0
|
149 indMinR_ = 2*beta_ - maxA;
|
y@0
|
150 if (minA >= beta_)
|
y@0
|
151 {
|
y@0
|
152 indMaxR_ = 2*beta_ - minA;
|
y@0
|
153 } else {
|
y@0
|
154 indMaxR_ = beta_;
|
y@0
|
155 }
|
y@0
|
156 } else {
|
y@0
|
157 computeR_ = false;
|
y@0
|
158 }
|
y@0
|
159
|
y@0
|
160 if (minA <= beta_)
|
y@0
|
161 {
|
y@0
|
162 computeL_ = true;
|
y@0
|
163 indMinL_ = minA;
|
y@0
|
164 if (maxA <= beta_)
|
y@0
|
165 {
|
y@0
|
166 indMaxL_ = maxA;
|
y@0
|
167 } else {
|
y@0
|
168 indMaxL_ = beta_;
|
y@0
|
169 }
|
y@0
|
170 } else {
|
y@0
|
171 computeL_ = false;
|
y@0
|
172 }
|
y@0
|
173
|
y@0
|
174 invGain_ = (indMaxR_ - indMinR_ + indMaxL_ - indMinL_);
|
y@0
|
175 if (computeR_)
|
y@0
|
176 invGain_++;
|
y@0
|
177 if (computeL_)
|
y@0
|
178 invGain_++;
|
y@0
|
179 invGain_ = 1;//invGain_;
|
y@0
|
180
|
y@0
|
181 }
|
y@0
|
182
|
y@0
|
183
|
y@0
|
184 const String ADRessAudioProcessor::getParameterName (int index)
|
y@0
|
185 {
|
y@0
|
186 switch (index)
|
y@0
|
187 {
|
y@0
|
188 case kFFTSizeParam: return "FFT size";
|
y@0
|
189 case kHopSizeParam: return "hop size";
|
y@0
|
190 case kWindowTypeParam: return "window type";
|
y@0
|
191 case kWidthParam: return "volume";
|
y@0
|
192 case kAzimuthParam: return "azimuth";
|
y@0
|
193
|
y@0
|
194 default: break;
|
y@0
|
195 }
|
y@0
|
196
|
y@0
|
197 return String::empty;
|
y@0
|
198 }
|
y@0
|
199
|
y@0
|
200 const String ADRessAudioProcessor::getParameterText (int index)
|
y@0
|
201 {
|
y@0
|
202 return String (getParameter (index), 2);
|
y@0
|
203 }
|
y@0
|
204
|
y@0
|
205 const String ADRessAudioProcessor::getInputChannelName (int channelIndex) const
|
y@0
|
206 {
|
y@0
|
207 return String (channelIndex + 1);
|
y@0
|
208 }
|
y@0
|
209
|
y@0
|
210 const String ADRessAudioProcessor::getOutputChannelName (int channelIndex) const
|
y@0
|
211 {
|
y@0
|
212 return String (channelIndex + 1);
|
y@0
|
213 }
|
y@0
|
214
|
y@0
|
215 bool ADRessAudioProcessor::isInputChannelStereoPair (int index) const
|
y@0
|
216 {
|
y@0
|
217 return true;
|
y@0
|
218 }
|
y@0
|
219
|
y@0
|
220 bool ADRessAudioProcessor::isOutputChannelStereoPair (int index) const
|
y@0
|
221 {
|
y@0
|
222 return true;
|
y@0
|
223 }
|
y@0
|
224
|
y@0
|
225 bool ADRessAudioProcessor::silenceInProducesSilenceOut() const
|
y@0
|
226 {
|
y@0
|
227 #if JucePlugin_SilenceInProducesSilenceOut
|
y@0
|
228 return true;
|
y@0
|
229 #else
|
y@0
|
230 return false;
|
y@0
|
231 #endif
|
y@0
|
232 }
|
y@0
|
233
|
y@0
|
234 bool ADRessAudioProcessor::acceptsMidi() const
|
y@0
|
235 {
|
y@0
|
236 #if JucePlugin_WantsMidiInput
|
y@0
|
237 return true;
|
y@0
|
238 #else
|
y@0
|
239 return false;
|
y@0
|
240 #endif
|
y@0
|
241 }
|
y@0
|
242
|
y@0
|
243 bool ADRessAudioProcessor::producesMidi() const
|
y@0
|
244 {
|
y@0
|
245 #if JucePlugin_ProducesMidiOutput
|
y@0
|
246 return true;
|
y@0
|
247 #else
|
y@0
|
248 return false;
|
y@0
|
249 #endif
|
y@0
|
250 }
|
y@0
|
251
|
y@0
|
252 double ADRessAudioProcessor::getTailLengthSeconds() const
|
y@0
|
253 {
|
y@0
|
254 return 0;
|
y@0
|
255 }
|
y@0
|
256
|
y@0
|
257 int ADRessAudioProcessor::getNumPrograms()
|
y@0
|
258 {
|
y@0
|
259 return 0;
|
y@0
|
260 }
|
y@0
|
261
|
y@0
|
262 int ADRessAudioProcessor::getCurrentProgram()
|
y@0
|
263 {
|
y@0
|
264 return 0;
|
y@0
|
265 }
|
y@0
|
266
|
y@0
|
267 void ADRessAudioProcessor::setCurrentProgram (int index)
|
y@0
|
268 {
|
y@0
|
269 }
|
y@0
|
270
|
y@0
|
271 const String ADRessAudioProcessor::getProgramName (int index)
|
y@0
|
272 {
|
y@0
|
273 return String::empty;
|
y@0
|
274 }
|
y@0
|
275
|
y@0
|
276 void ADRessAudioProcessor::changeProgramName (int index, const String& newName)
|
y@0
|
277 {
|
y@0
|
278 }
|
y@0
|
279
|
y@0
|
280 //==============================================================================
|
y@0
|
281 void ADRessAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock)
|
y@0
|
282 {
|
y@0
|
283 // Use this method as the place to do any pre-playback
|
y@0
|
284 // initialisation that you need..
|
y@0
|
285
|
y@0
|
286 initFFT(fftSelectedSize_);
|
y@0
|
287 initWindow(fftSelectedSize_, windowType_);
|
y@0
|
288 preparedToPlay_ = true;
|
y@0
|
289 }
|
y@0
|
290
|
y@0
|
291 void ADRessAudioProcessor::releaseResources()
|
y@0
|
292 {
|
y@0
|
293 // When playback stops, you can use this as an opportunity to free up any
|
y@0
|
294 // spare memory, etc.
|
y@0
|
295
|
y@0
|
296 deinitFFT();
|
y@0
|
297 deinitWindow();
|
y@0
|
298 preparedToPlay_ = false;
|
y@0
|
299 }
|
y@0
|
300
|
y@0
|
301
|
y@0
|
302
|
y@0
|
303
|
y@0
|
304
|
y@0
|
305 void ADRessAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
|
y@0
|
306 {
|
y@0
|
307 // Helpful information about this block of samples:
|
y@0
|
308 const int numInputChannels = getNumInputChannels(); // How many input channels for our effect?
|
y@0
|
309 const int numOutputChannels = getNumOutputChannels(); // How many output channels for our effect?
|
y@0
|
310 const int numSamples = buffer.getNumSamples(); // How many samples in the buffer for this block?
|
y@0
|
311
|
y@0
|
312 int channel, inwritepos, sampsincefft;
|
y@0
|
313 int outreadpos, outwritepos;
|
y@0
|
314
|
y@0
|
315 // Grab the lock that prevents the FFT settings from changing
|
y@0
|
316 fftSpinLock_.enter();
|
y@0
|
317
|
y@0
|
318 // Check that we're initialised and ready to go. If not, set output to 0
|
y@0
|
319 if(!fftInitialised_)
|
y@0
|
320 {
|
y@0
|
321 for (channel = 0; channel < numOutputChannels; ++channel)
|
y@0
|
322 {
|
y@0
|
323 buffer.clear (channel, 0, buffer.getNumSamples());
|
y@0
|
324 }
|
y@0
|
325
|
y@0
|
326 fftSpinLock_.exit();
|
y@0
|
327 return;
|
y@0
|
328 }
|
y@0
|
329
|
y@0
|
330
|
y@0
|
331
|
y@0
|
332 // Go through each channel of audio that's passed in. Collect the samples in the input
|
y@0
|
333 // buffer. When we've reached the next hop interval, calculate the FFT.
|
y@0
|
334
|
y@0
|
335 // channelDataL is an array of length numSamples which contains the audio for one channel
|
y@0
|
336 float* channelDataL = buffer.getSampleData(0);
|
y@0
|
337 float* channelDataR = buffer.getSampleData(1);
|
y@0
|
338
|
y@0
|
339 // inputBufferDataL is the circular buffer for collecting input samples for the FFT
|
y@0
|
340 float* inputBufferDataL = inputBuffer_.getSampleData(0);
|
y@0
|
341 float* inputBufferDataR = inputBuffer_.getSampleData(1);
|
y@0
|
342
|
y@0
|
343 float* outputBufferDataL = outputBuffer_.getSampleData(0);
|
y@0
|
344 float* outputBufferDataR = outputBuffer_.getSampleData(1);
|
y@0
|
345
|
y@0
|
346
|
y@0
|
347 // State variables need to be temporarily cached for each channel. We don't want the
|
y@0
|
348 // operations on one channel to affect the identical behaviour of the next channel
|
y@0
|
349 inwritepos = inputBufferWritePosition_;
|
y@0
|
350 outwritepos = outputBufferWritePosition_;
|
y@0
|
351 outreadpos = outputBufferReadPosition_;
|
y@0
|
352 sampsincefft = samplesSinceLastFFT_;
|
y@0
|
353
|
y@0
|
354 for (int i = 0; i < numSamples; ++i)
|
y@0
|
355 {
|
y@0
|
356 const float inL = channelDataL[i];
|
y@0
|
357 const float inR = channelDataR[i];
|
y@0
|
358
|
y@0
|
359 // Store the next buffered sample in the output. Do this first before anything
|
y@0
|
360 // changes the output buffer-- we will have at least one FFT size worth of data
|
y@0
|
361 // stored and ready to go. Set the result to 0 when finished in preparation for the
|
y@0
|
362 // next overlap/add procedure.
|
y@0
|
363
|
y@0
|
364 channelDataL[i] = outputBufferDataL[outreadpos];
|
y@0
|
365 channelDataR[i] = outputBufferDataR[outreadpos];
|
y@0
|
366 outputBufferDataL[outreadpos] = 0.0;
|
y@0
|
367 outputBufferDataR[outreadpos] = 0.0;
|
y@0
|
368
|
y@0
|
369 if(++outreadpos >= outputBufferLength_)
|
y@0
|
370 outreadpos = 0;
|
y@0
|
371
|
y@0
|
372 // Store the current sample in the input buffer, incrementing the write pointer. Also
|
y@0
|
373 // increment how many samples we've stored since the last transform. If it reaches the
|
y@0
|
374 // hop size, perform an FFT and any frequency-domain processing.
|
y@0
|
375 inputBufferDataL[inwritepos] = inL;
|
y@0
|
376 inputBufferDataR[inwritepos] = inR;
|
y@0
|
377 if (++inwritepos >= inputBufferLength_)
|
y@0
|
378 inwritepos = 0;
|
y@0
|
379 if (++sampsincefft >= hopActualSize_)
|
y@0
|
380 {
|
y@0
|
381 sampsincefft = 0;
|
y@0
|
382
|
y@0
|
383 // Find the index of the starting sample in the buffer. When the buffer length
|
y@0
|
384 // is equal to the transform size, this will be the current write position but
|
y@0
|
385 // this code is more general for larger buffers.
|
y@0
|
386 int inputBufferStartPosition = (inwritepos + inputBufferLength_
|
y@0
|
387 - fftActualTransformSize_) % inputBufferLength_;
|
y@0
|
388
|
y@0
|
389 // Window the buffer and copy it into the FFT input
|
y@0
|
390 int inputBufferIndex = inputBufferStartPosition;
|
y@0
|
391 for(int fftBufferIndex = 0; fftBufferIndex < fftActualTransformSize_; fftBufferIndex++)
|
y@0
|
392 {
|
y@0
|
393 // Set real part to windowed signal; imaginary part to 0.
|
y@0
|
394 fftTimeDomain_[fftBufferIndex][1] = 0.0;
|
y@0
|
395 if(fftBufferIndex >= windowBufferLength_) // Safety check, in case window isn't ready
|
y@0
|
396 fftTimeDomain_[fftBufferIndex][0] = 0.0;
|
y@0
|
397 else
|
y@0
|
398 fftTimeDomain_[fftBufferIndex][0] = windowBuffer_[fftBufferIndex]
|
y@0
|
399 * inputBufferDataL[inputBufferIndex];
|
y@0
|
400 inputBufferIndex++;
|
y@0
|
401 if(inputBufferIndex >= inputBufferLength_)
|
y@0
|
402 inputBufferIndex = 0;
|
y@0
|
403 }
|
y@0
|
404
|
y@0
|
405 // Perform the FFT on the windowed data, going into the frequency domain.Result will be in fftFrequencyDomain_
|
y@0
|
406 fftw_execute(fftForwardPlan_);
|
y@0
|
407
|
y@0
|
408 // ********** PHASE VOCODER PROCESSING GOES HERE **************
|
y@0
|
409 // This is the place where frequency-domain calculations are made on the transformed signal.
|
y@0
|
410 //Put the result back into fftFrequencyDomain_ before transforming back.
|
y@0
|
411
|
y@0
|
412 for (int ii = 0; ii < fftActualTransformSize_/2+1; ii++)
|
y@0
|
413 {
|
y@0
|
414 realL_[ii] = fftFrequencyDomain_[ii][0];
|
y@0
|
415 imagL_[ii] = fftFrequencyDomain_[ii][1];
|
y@0
|
416 }
|
y@0
|
417
|
y@0
|
418 inputBufferIndex = inputBufferStartPosition;
|
y@0
|
419 for(int fftBufferIndex = 0; fftBufferIndex < fftActualTransformSize_; fftBufferIndex++)
|
y@0
|
420 {
|
y@0
|
421 if(fftBufferIndex >= windowBufferLength_) // Safety check, in case window isn't ready
|
y@0
|
422 fftTimeDomain_[fftBufferIndex][0] = 0.0;
|
y@0
|
423 else
|
y@0
|
424 fftTimeDomain_[fftBufferIndex][0] = windowBuffer_[fftBufferIndex]
|
y@0
|
425 * inputBufferDataR[inputBufferIndex];
|
y@0
|
426 inputBufferIndex++;
|
y@0
|
427 if(inputBufferIndex >= inputBufferLength_)
|
y@0
|
428 inputBufferIndex = 0;
|
y@0
|
429 }
|
y@0
|
430 fftw_execute(fftForwardPlan_);
|
y@0
|
431
|
y@0
|
432
|
y@0
|
433 for (int ii = 0; ii < fftActualTransformSize_/2+1; ii++)
|
y@0
|
434 {
|
y@0
|
435 realR_[ii] = fftFrequencyDomain_[ii][0];
|
y@0
|
436 imagR_[ii] = fftFrequencyDomain_[ii][1];
|
y@0
|
437 }
|
y@0
|
438
|
y@0
|
439
|
y@0
|
440
|
y@0
|
441 double minV, maxV,newVal;
|
y@0
|
442 std::complex<double> fftTemp;
|
y@0
|
443 int indMin;
|
y@0
|
444 for (int ii = 0; ii <fftActualTransformSize_/2+1; ii++)
|
y@0
|
445 {
|
y@0
|
446 if (computeR_)
|
y@0
|
447 {
|
y@0
|
448 //Right plane
|
y@0
|
449 minV = realL_[ii]*realL_[ii] + imagL_[ii]*imagL_[ii];
|
y@0
|
450 maxV = minV;
|
y@0
|
451 azr_[ii][0] = 0;
|
y@0
|
452 indMin = 0;
|
y@0
|
453 for (int j = 1; j <= beta_; j++)
|
y@0
|
454 {
|
y@0
|
455 newVal = (realL_[ii] - j*invBeta_*realR_[ii])*(realL_[ii] - j*invBeta_*realR_[ii]) + (imagL_[ii] - j*invBeta_*imagR_[ii])*(imagL_[ii] - j*invBeta_*imagR_[ii]);
|
y@0
|
456 azr_[ii][j] = 0;
|
y@0
|
457
|
y@0
|
458 if (newVal>maxV)
|
y@0
|
459 {
|
y@0
|
460 maxV = newVal;
|
y@0
|
461 }
|
y@0
|
462 else if (newVal<minV)
|
y@0
|
463 {
|
y@0
|
464 minV = newVal;
|
y@0
|
465 indMin = j;
|
y@0
|
466 }
|
y@0
|
467 }
|
y@0
|
468 azr_[ii][indMin] = sqrt(maxV) - sqrt(minV);
|
y@0
|
469 }
|
y@0
|
470
|
y@0
|
471 if(computeL_)
|
y@0
|
472 {
|
y@0
|
473 ////Left plane
|
y@0
|
474 minV = realR_[ii]*realR_[ii] + imagR_[ii]*imagR_[ii];
|
y@0
|
475 maxV = minV;
|
y@0
|
476 indMin = 0;
|
y@0
|
477 azl_[ii][0] = 0;
|
y@0
|
478 for (int j = 1; j <= beta_; j++)
|
y@0
|
479 {
|
y@0
|
480 newVal = (realR_[ii] - j*invBeta_*realL_[ii])*(realR_[ii] - j*invBeta_*realL_[ii]) + (imagR_[ii] - j*invBeta_*imagL_[ii])*(imagR_[ii] - j*invBeta_*imagL_[ii]);
|
y@0
|
481 azr_[ii][j] = 0;
|
y@0
|
482 if (newVal>maxV)
|
y@0
|
483 {
|
y@0
|
484 maxV = newVal;
|
y@0
|
485 }
|
y@0
|
486 else if (newVal<minV)
|
y@0
|
487 {
|
y@0
|
488 minV = newVal;
|
y@0
|
489 indMin = j;
|
y@0
|
490 }
|
y@0
|
491 }
|
y@0
|
492 azl_[ii][indMin] = sqrt(maxV) - sqrt(minV);
|
y@0
|
493 }
|
y@0
|
494 }
|
y@0
|
495
|
y@0
|
496
|
y@0
|
497 for (int ii = 0; ii < fftActualTransformSize_/2+1; ii++)
|
y@0
|
498 {
|
y@0
|
499 fftTemp = 0;
|
y@0
|
500 for ( int j = indMinR_; j <= indMaxR_; j++)
|
y@0
|
501 {
|
y@0
|
502 fftTemp += azr_[ii][j]*exp(i1*imagR_[ii]);
|
y@0
|
503 }
|
y@0
|
504 for ( int j = indMinL_; j <= indMaxL_; j++)
|
y@0
|
505 {
|
y@0
|
506 fftTemp += azl_[ii][j]*exp(i1*imagL_[ii]);
|
y@0
|
507 }
|
y@0
|
508 // fftTemp *= invGain_;
|
y@0
|
509
|
y@0
|
510 fftFrequencyDomain_[ii][0] = real(fftTemp);
|
y@0
|
511 fftFrequencyDomain_[ii][1] = imag(fftTemp);
|
y@0
|
512 }
|
y@0
|
513
|
y@0
|
514 for (int ii = 1; ii < fftActualTransformSize_/2; ii++)
|
y@0
|
515 {
|
y@0
|
516 fftFrequencyDomain_[fftActualTransformSize_-ii][0] = fftFrequencyDomain_[ii][0];
|
y@0
|
517 fftFrequencyDomain_[fftActualTransformSize_-ii][1] = -fftFrequencyDomain_[ii][1];
|
y@0
|
518 }
|
y@0
|
519 fftw_execute(fftBackwardPlan_);
|
y@0
|
520
|
y@0
|
521 float audioTemp;
|
y@0
|
522 int outputBufferIndex = outwritepos;
|
y@0
|
523 for(int fftBufferIndex = 0; fftBufferIndex < fftActualTransformSize_; fftBufferIndex++)
|
y@0
|
524 {
|
y@0
|
525 audioTemp = (float)fftTimeDomain_[fftBufferIndex][0] * fftScaleFactor_;
|
y@0
|
526 outputBufferDataR[outputBufferIndex] += audioTemp;
|
y@0
|
527 outputBufferDataL[outputBufferIndex] += audioTemp;
|
y@0
|
528 if(++outputBufferIndex >= outputBufferLength_)
|
y@0
|
529 outputBufferIndex = 0;
|
y@0
|
530 }
|
y@0
|
531
|
y@0
|
532
|
y@0
|
533
|
y@0
|
534 // Add the result to the output buffer, starting at the current write position
|
y@0
|
535 // (Output buffer will have been zeroed after reading the last time around)
|
y@0
|
536 // Output needs to be scaled by the transform size to get back to original amplitude:
|
y@0
|
537 // this is a property of how fftw is implemented. Scaling will also need to be adjusted
|
y@0
|
538 // based on hop size to get the same output level (smaller hop size produces more overlap
|
y@0
|
539 // and hence higher signal level)
|
y@0
|
540
|
y@0
|
541
|
y@0
|
542
|
y@0
|
543
|
y@0
|
544 // Advance the write position within the buffer by the hop size
|
y@0
|
545 outwritepos = (outwritepos + hopActualSize_) % outputBufferLength_;
|
y@0
|
546 }
|
y@0
|
547 }
|
y@0
|
548
|
y@0
|
549
|
y@0
|
550 // Having made a local copy of the state variables for each channel, now transfer the result
|
y@0
|
551 // back to the main state variable so they will be preserved for the next call of processBlock()
|
y@0
|
552 inputBufferWritePosition_ = inwritepos;
|
y@0
|
553 outputBufferWritePosition_ = outwritepos;
|
y@0
|
554 outputBufferReadPosition_ = outreadpos;
|
y@0
|
555 samplesSinceLastFFT_ = sampsincefft;
|
y@0
|
556
|
y@0
|
557 // In case we have more outputs than inputs, we'll clear any output
|
y@0
|
558 // channels that didn't contain input data, (because these aren't
|
y@0
|
559 // guaranteed to be empty - they may contain garbage).
|
y@0
|
560 for (int i = numInputChannels; i < numOutputChannels; ++i)
|
y@0
|
561 {
|
y@0
|
562 buffer.clear (i, 0, buffer.getNumSamples());
|
y@0
|
563 }
|
y@0
|
564
|
y@0
|
565 fftSpinLock_.exit();
|
y@0
|
566 }
|
y@0
|
567
|
y@0
|
568 //==============================================================================
|
y@0
|
569 bool ADRessAudioProcessor::hasEditor() const
|
y@0
|
570 {
|
y@0
|
571 return true; // (change this to false if you choose to not supply an editor)
|
y@0
|
572 }
|
y@0
|
573
|
y@0
|
574 AudioProcessorEditor* ADRessAudioProcessor::createEditor()
|
y@0
|
575 {
|
y@0
|
576 return new ADRessAudioProcessorEditor (this);
|
y@0
|
577 }
|
y@0
|
578
|
y@0
|
579 //==============================================================================
|
y@0
|
580 void ADRessAudioProcessor::getStateInformation (MemoryBlock& destData)
|
y@0
|
581 {
|
y@0
|
582 // You should use this method to store your parameters in the memory block.
|
y@0
|
583 // You could do that either as raw data, or use the XML or ValueTree classes
|
y@0
|
584 // as intermediaries to make it easy to save and load complex data.
|
y@0
|
585
|
y@0
|
586 // Create an outer XML element..
|
y@0
|
587 XmlElement xml("C4DMPLUGINSETTINGS");
|
y@0
|
588
|
y@0
|
589 // add some attributes to it..
|
y@0
|
590 xml.setAttribute("uiWidth", lastUIWidth_);
|
y@0
|
591 xml.setAttribute("uiHeight", lastUIHeight_);
|
y@0
|
592 xml.setAttribute("fftSize", fftSelectedSize_);
|
y@0
|
593 xml.setAttribute("hopSize", hopSelectedSize_);
|
y@0
|
594 xml.setAttribute("windowType", windowType_);
|
y@0
|
595 xml.setAttribute("volume", width_);
|
y@0
|
596 xml.setAttribute("azimuth", azimuth_);
|
y@0
|
597
|
y@0
|
598 // then use this helper function to stuff it into the binary blob and return it..
|
y@0
|
599 copyXmlToBinary(xml, destData);
|
y@0
|
600 }
|
y@0
|
601
|
y@0
|
602 void ADRessAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
|
y@0
|
603 {
|
y@0
|
604 // You should use this method to restore your parameters from this memory block,
|
y@0
|
605 // whose contents will have been created by the getStateInformation() call.
|
y@0
|
606
|
y@0
|
607 // This getXmlFromBinary() helper function retrieves our XML from the binary blob..
|
y@0
|
608 ScopedPointer<XmlElement> xmlState (getXmlFromBinary (data, sizeInBytes));
|
y@0
|
609
|
y@0
|
610 if(xmlState != 0)
|
y@0
|
611 {
|
y@0
|
612 // make sure that it's actually our type of XML object..
|
y@0
|
613 if(xmlState->hasTagName("C4DMPLUGINSETTINGS"))
|
y@0
|
614 {
|
y@0
|
615 // ok, now pull out our parameters..
|
y@0
|
616 lastUIWidth_ = xmlState->getIntAttribute("uiWidth", lastUIWidth_);
|
y@0
|
617 lastUIHeight_ = xmlState->getIntAttribute("uiHeight", lastUIHeight_);
|
y@0
|
618
|
y@0
|
619 fftSelectedSize_ = (int)xmlState->getDoubleAttribute("fftSize", fftSelectedSize_);
|
y@0
|
620 hopSelectedSize_ = (int)xmlState->getDoubleAttribute("hopSize", hopSelectedSize_);
|
y@0
|
621 windowType_ = (int)xmlState->getDoubleAttribute("windowType", windowType_);
|
y@0
|
622 width_ = (int)xmlState->getDoubleAttribute("volume", width_);
|
y@0
|
623 azimuth_ = (int)xmlState->getDoubleAttribute("azimuth", azimuth_);
|
y@0
|
624
|
y@0
|
625
|
y@0
|
626 if(preparedToPlay_)
|
y@0
|
627 {
|
y@0
|
628 // Update settings if currently playing, else wait until prepareToPlay() called
|
y@0
|
629 initFFT(fftSelectedSize_);
|
y@0
|
630 initWindow(fftSelectedSize_, windowType_);
|
y@0
|
631 }
|
y@0
|
632 }
|
y@0
|
633 }
|
y@0
|
634 }
|
y@0
|
635
|
y@0
|
636 //==============================================================================
|
y@0
|
637 // Initialise the FFT data structures for a given length transform
|
y@0
|
638 void ADRessAudioProcessor::initFFT(int length)
|
y@0
|
639 {
|
y@0
|
640 if(fftInitialised_)
|
y@0
|
641 deinitFFT();
|
y@0
|
642
|
y@0
|
643 // Save the current length so we know how big our results are later
|
y@0
|
644 fftActualTransformSize_ = length;
|
y@0
|
645
|
y@0
|
646 // Here we allocate the complex-number buffers for the FFT. This uses
|
y@0
|
647 // a convenient wrapper on the more general fftw_malloc()
|
y@0
|
648 fftTimeDomain_ = fftw_alloc_complex(length);
|
y@0
|
649 fftFrequencyDomain_ = fftw_alloc_complex(length);
|
y@0
|
650
|
y@0
|
651 // FFTW_ESTIMATE doesn't necessarily produce the fastest executing code (FFTW_MEASURE
|
y@0
|
652 // will get closer) but it carries a minimum startup cost. FFTW_MEASURE might stall for
|
y@0
|
653 // several seconds which would be annoying in an audio plug-in context.
|
y@0
|
654 fftForwardPlan_ = fftw_plan_dft_1d(fftActualTransformSize_, fftTimeDomain_,
|
y@0
|
655 fftFrequencyDomain_, FFTW_FORWARD, FFTW_ESTIMATE);
|
y@0
|
656 fftBackwardPlan_ = fftw_plan_dft_1d(fftActualTransformSize_, fftFrequencyDomain_,
|
y@0
|
657 fftTimeDomain_, FFTW_BACKWARD, FFTW_ESTIMATE);
|
y@0
|
658
|
y@0
|
659 // Allocate the buffer that the samples will be collected in
|
y@0
|
660 inputBufferLength_ = fftActualTransformSize_;
|
y@0
|
661 inputBuffer_.setSize(2, inputBufferLength_);
|
y@0
|
662 inputBuffer_.clear();
|
y@0
|
663 inputBufferWritePosition_ = 0;
|
y@0
|
664 samplesSinceLastFFT_ = 0;
|
y@0
|
665
|
y@0
|
666 // Allocate the output buffer to be twice the size of the FFT
|
y@0
|
667 // This will be enough for all hop size cases
|
y@0
|
668 outputBufferLength_ = 2*fftActualTransformSize_;
|
y@0
|
669 outputBuffer_.setSize(2, outputBufferLength_);
|
y@0
|
670 outputBuffer_.clear();
|
y@0
|
671 outputBufferReadPosition_ = 0;
|
y@0
|
672
|
y@0
|
673 updateHopSize();
|
y@0
|
674
|
y@0
|
675 fftInitialised_ = true;
|
y@0
|
676 }
|
y@0
|
677
|
y@0
|
678 // Free the FFT data structures
|
y@0
|
679 void ADRessAudioProcessor::deinitFFT()
|
y@0
|
680 {
|
y@0
|
681 if(!fftInitialised_)
|
y@0
|
682 return;
|
y@0
|
683
|
y@0
|
684 // Prevent this variable from changing while an audio callback is running.
|
y@0
|
685 // Once it has changed, the next audio callback will find that it's not
|
y@0
|
686 // initialised and will return silence instead of attempting to work with the
|
y@0
|
687 // (invalid) FFT structures. This produces an audible glitch but no crash,
|
y@0
|
688 // and is the simplest way to handle parameter changes in this example code.
|
y@0
|
689 fftSpinLock_.enter();
|
y@0
|
690 fftInitialised_ = false;
|
y@0
|
691 fftSpinLock_.exit();
|
y@0
|
692
|
y@0
|
693 fftw_destroy_plan(fftForwardPlan_);
|
y@0
|
694 fftw_destroy_plan(fftBackwardPlan_);
|
y@0
|
695 fftw_free(fftTimeDomain_);
|
y@0
|
696 fftw_free(fftFrequencyDomain_);
|
y@0
|
697
|
y@0
|
698 // Leave the input buffer in memory until the plugin is released
|
y@0
|
699 }
|
y@0
|
700
|
y@0
|
701 //==============================================================================
|
y@0
|
702 // Create a new window of a given length and type
|
y@0
|
703 void ADRessAudioProcessor::initWindow(int length, int windowType)
|
y@0
|
704 {
|
y@0
|
705 if(windowBuffer_ != 0)
|
y@0
|
706 deinitWindow();
|
y@0
|
707 if(length == 0) // Sanity check
|
y@0
|
708 return;
|
y@0
|
709
|
y@0
|
710 // Allocate memory for the window
|
y@0
|
711 windowBuffer_ = (double *)malloc(length * sizeof(double));
|
y@0
|
712
|
y@0
|
713 // Write the length as a double here to simplify the code below (otherwise
|
y@0
|
714 // typecasts would be wise)
|
y@0
|
715 double windowLength = length;
|
y@0
|
716
|
y@0
|
717 // Set values for the window, depending on its type
|
y@0
|
718 for(int i = 0; i < length; i++)
|
y@0
|
719 {
|
y@0
|
720 // Window functions are typically defined to be symmetrical. This will cause a
|
y@0
|
721 // problem in the overlap-add process: the windows instead need to be periodic
|
y@0
|
722 // when arranged end-to-end. As a result we calculate the window of one sample
|
y@0
|
723 // larger than usual, and drop the last sample. (This works as long as N is even.)
|
y@0
|
724 // See Julius Smith, "Spectral Audio Signal Processing" for details.
|
y@0
|
725 switch(windowType)
|
y@0
|
726 {
|
y@0
|
727 case kWindowBartlett:
|
y@0
|
728 windowBuffer_[i] = (2.0/(windowLength + 2.0))*
|
y@0
|
729 (0.5*(windowLength + 2.0) - abs((double)i - 0.5*windowLength));
|
y@0
|
730 break;
|
y@0
|
731 case kWindowHann:
|
y@0
|
732 windowBuffer_[i] = 0.5*(1.0 - cos(2.0*M_PI*(double)i/windowLength));
|
y@0
|
733 break;
|
y@0
|
734 case kWindowHamming:
|
y@0
|
735 windowBuffer_[i] = 0.54 - 0.46*cos(2.0*M_PI*(double)i/windowLength);
|
y@0
|
736 break;
|
y@0
|
737 case kWindowRectangular:
|
y@0
|
738 default:
|
y@0
|
739 windowBuffer_[i] = 1.0;
|
y@0
|
740 break;
|
y@0
|
741 }
|
y@0
|
742 }
|
y@0
|
743
|
y@0
|
744 windowBufferLength_ = length;
|
y@0
|
745 updateScaleFactor();
|
y@0
|
746 }
|
y@0
|
747
|
y@0
|
748 // Free the window buffer
|
y@0
|
749 void ADRessAudioProcessor::deinitWindow()
|
y@0
|
750 {
|
y@0
|
751 if(windowBuffer_ == 0)
|
y@0
|
752 return;
|
y@0
|
753
|
y@0
|
754 // Delay clearing the window until the audio thread is not running
|
y@0
|
755 // to avoid a crash if the code tries to access an invalid window
|
y@0
|
756 fftSpinLock_.enter();
|
y@0
|
757 windowBufferLength_ = 0;
|
y@0
|
758 fftSpinLock_.exit();
|
y@0
|
759
|
y@0
|
760 free(windowBuffer_);
|
y@0
|
761 windowBuffer_ = 0;
|
y@0
|
762 }
|
y@0
|
763
|
y@0
|
764 // Update the actual hop size depending on the window size and hop size settings
|
y@0
|
765 // Hop size is expressed as a fraction of a window in the parameters.
|
y@0
|
766 void ADRessAudioProcessor::updateHopSize()
|
y@0
|
767 {
|
y@0
|
768 switch(hopSelectedSize_)
|
y@0
|
769 {
|
y@0
|
770 case kHopSize1Window:
|
y@0
|
771 hopActualSize_ = fftActualTransformSize_;
|
y@0
|
772 break;
|
y@0
|
773 case kHopSize1_2Window:
|
y@0
|
774 hopActualSize_ = fftActualTransformSize_ / 2;
|
y@0
|
775 break;
|
y@0
|
776 case kHopSize1_4Window:
|
y@0
|
777 hopActualSize_ = fftActualTransformSize_ / 4;
|
y@0
|
778 break;
|
y@0
|
779 case kHopSize1_8Window:
|
y@0
|
780 hopActualSize_ = fftActualTransformSize_ / 8;
|
y@0
|
781 break;
|
y@0
|
782 }
|
y@0
|
783
|
y@0
|
784 // Update the factor by which samples are scaled to preserve unity gain
|
y@0
|
785 updateScaleFactor();
|
y@0
|
786
|
y@0
|
787 // Read pointer lags the write pointer to allow for FFT buffers to accumulate and
|
y@0
|
788 // be processed. Total latency is sum of the FFT size and the hop size.
|
y@0
|
789 outputBufferWritePosition_ = hopActualSize_ + fftActualTransformSize_;
|
y@0
|
790 }
|
y@0
|
791
|
y@0
|
792 // Update the factor by which each output sample is scaled. This needs to update
|
y@0
|
793 // every time FFT size, hop size, and window type are changed.
|
y@0
|
794 void ADRessAudioProcessor::updateScaleFactor()
|
y@0
|
795 {
|
y@0
|
796 // The gain needs to be normalised by the sum of the window, which implicitly
|
y@0
|
797 // accounts for the length of the transform and the window type. From there
|
y@0
|
798 // we also update based on hop size: smaller hop means more overlap means the
|
y@0
|
799 // overall gain should be reduced.
|
y@0
|
800 double windowSum = 0.0;
|
y@0
|
801
|
y@0
|
802 for(int i = 0; i < windowBufferLength_; i++)
|
y@0
|
803 {
|
y@0
|
804 windowSum += windowBuffer_[i];
|
y@0
|
805 }
|
y@0
|
806
|
y@0
|
807 if(windowSum == 0.0)
|
y@0
|
808 fftScaleFactor_ = 0.0; // Catch invalid cases and mute output
|
y@0
|
809 else
|
y@0
|
810 {
|
y@0
|
811 switch(hopSelectedSize_)
|
y@0
|
812 {
|
y@0
|
813 case kHopSize1Window: // 0dB
|
y@0
|
814 fftScaleFactor_ = 1.0/(double)windowSum;
|
y@0
|
815 break;
|
y@0
|
816 case kHopSize1_2Window: // -6dB
|
y@0
|
817 fftScaleFactor_ = 0.5/(double)windowSum;
|
y@0
|
818 break;
|
y@0
|
819 case kHopSize1_4Window: // -12dB
|
y@0
|
820 fftScaleFactor_ = 0.25/(double)windowSum;
|
y@0
|
821 break;
|
y@0
|
822 case kHopSize1_8Window: // -18dB
|
y@0
|
823 fftScaleFactor_ = 0.125/(double)windowSum;
|
y@0
|
824 break;
|
y@0
|
825 }
|
y@0
|
826 }
|
y@0
|
827 }
|
y@0
|
828
|
y@0
|
829 //==============================================================================
|
y@0
|
830 // This creates new instances of the plugin..
|
y@0
|
831 AudioProcessor* JUCE_CALLTYPE createPluginFilter()
|
y@0
|
832 {
|
y@0
|
833 return new ADRessAudioProcessor();
|
y@0
|
834 }
|