comparison examples/basic_FFT_phase_vocoder/render.cpp @ 375:768acdeea362 prerelease

Merge
author Giulio Moro <giuliomoro@yahoo.it>
date Fri, 10 Jun 2016 00:35:18 +0100
parents 3bed6b09223c
children 24c3a0663d54
comparison
equal deleted inserted replaced
374:ecad1ea0382a 375:768acdeea362
1 /*
2 ____ _____ _ _
3 | __ )| ____| | / \
4 | _ \| _| | | / _ \
5 | |_) | |___| |___ / ___ \
6 |____/|_____|_____/_/ \_\.io
7
8 */
9
1 /* 10 /*
2 * render.cpp 11 * render.cpp
3 * 12 *
4 * Created on: Oct 24, 2014 13 * Created on: Oct 24, 2014
5 * Author: parallels 14 * Author: parallels
6 */ 15 */
7 16
17 /**
18 \example 4_audio_FFT_phase_vocoder
19
20 Phase Vocoder
21 ----------------------
22
23 This sketch shows an implementation of a phase vocoder and builds on the previous FFT example.
24 Again it uses the NE10 library, included at the top of the file (line 11).
25
26 Read the documentation on the NE10 library [here](http://projectne10.github.io/Ne10/doc/annotated.html).
27 */
8 28
9 #include <Bela.h> 29 #include <Bela.h>
10 #include <rtdk.h> 30 #include <rtdk.h>
11 #include <NE10.h> // NEON FFT library 31 #include <NE10.h> // NEON FFT library
12 #include <cmath> 32 #include <cmath>
58 }; 78 };
59 79
60 float gDryWet = 1; // mix between the unprocessed and processed sound 80 float gDryWet = 1; // mix between the unprocessed and processed sound
61 float gPlaybackLive = 0.5f; // mix between the file playback and the live audio input 81 float gPlaybackLive = 0.5f; // mix between the file playback and the live audio input
62 float gGain = 1; // overall gain 82 float gGain = 1; // overall gain
83 float *gInputAudio = NULL;
63 Midi midi; 84 Midi midi;
85
86
64 void midiCallback(MidiChannelMessage message, void* arg){ 87 void midiCallback(MidiChannelMessage message, void* arg){
65 if(message.getType() == kmmNoteOn){ 88 if(message.getType() == kmmNoteOn){
66 if(message.getDataByte(1) > 0){ 89 if(message.getDataByte(1) > 0){
67 int note = message.getDataByte(0); 90 int note = message.getDataByte(0);
68 float frequency = powf(2, (note-69)/12.f)*440; 91 float frequency = powf(2, (note-69)/12.f)*440;
111 gOutputBufferWritePointer += gHopSize; 134 gOutputBufferWritePointer += gHopSize;
112 135
113 timeDomainIn = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); 136 timeDomainIn = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t));
114 timeDomainOut = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); 137 timeDomainOut = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t));
115 frequencyDomain = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); 138 frequencyDomain = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t));
116 cfg = ne10_fft_alloc_c2c_float32 (gFFTSize); 139 cfg = ne10_fft_alloc_c2c_float32_neon (gFFTSize);
117 140
118 memset(timeDomainOut, 0, gFFTSize * sizeof (ne10_fft_cpx_float32_t)); 141 memset(timeDomainOut, 0, gFFTSize * sizeof (ne10_fft_cpx_float32_t));
119 memset(gOutputBuffer, 0, BUFFER_SIZE * sizeof(float)); 142 memset(gOutputBuffer, 0, BUFFER_SIZE * sizeof(float));
143
144 // Allocate buffer to mirror and modify the input
145 gInputAudio = (float *)malloc(context->audioFrames * context->audioChannels * sizeof(float));
146 if(gInputAudio == 0)
147 return false;
120 148
121 // Allocate the window buffer based on the FFT size 149 // Allocate the window buffer based on the FFT size
122 gWindowBuffer = (float *)malloc(gFFTSize * sizeof(float)); 150 gWindowBuffer = (float *)malloc(gFFTSize * sizeof(float));
123 if(gWindowBuffer == 0) 151 if(gWindowBuffer == 0)
124 return false; 152 return false;
155 if(pointer >= BUFFER_SIZE) 183 if(pointer >= BUFFER_SIZE)
156 pointer = 0; 184 pointer = 0;
157 } 185 }
158 186
159 // Run the FFT 187 // Run the FFT
160 ne10_fft_c2c_1d_float32_neon (frequencyDomain, timeDomainIn, cfg->twiddles, cfg->factors, gFFTSize, 0); 188 ne10_fft_c2c_1d_float32_neon (frequencyDomain, timeDomainIn, cfg, 0);
161 189
162 switch (gEffect){ 190 switch (gEffect){
163 case kRobot : 191 case kRobot :
164 // Robotise the output 192 // Robotise the output
165 for(int n = 0; n < gFFTSize; n++) { 193 for(int n = 0; n < gFFTSize; n++) {
180 //bypass 208 //bypass
181 break; 209 break;
182 } 210 }
183 211
184 // Run the inverse FFT 212 // Run the inverse FFT
185 ne10_fft_c2c_1d_float32_neon (timeDomainOut, frequencyDomain, cfg->twiddles, cfg->factors, gFFTSize, 1); 213 ne10_fft_c2c_1d_float32_neon (timeDomainOut, frequencyDomain, cfg, 1);
186 // Overlap-and-add timeDomainOut into the output buffer 214 // Overlap-and-add timeDomainOut into the output buffer
187 pointer = outWritePointer; 215 pointer = outWritePointer;
188 for(int n = 0; n < gFFTSize; n++) { 216 for(int n = 0; n < gFFTSize; n++) {
189 outBuffer[pointer] += (timeDomainOut[n].r) * gFFTScaleFactor; 217 outBuffer[pointer] += (timeDomainOut[n].r) * gFFTScaleFactor;
190 if(isnan(outBuffer[pointer])) 218 if(isnan(outBuffer[pointer]))
204 // Input and output are given from the audio hardware and the other 232 // Input and output are given from the audio hardware and the other
205 // ADCs and DACs (if available). If only audio is available, numMatrixFrames 233 // ADCs and DACs (if available). If only audio is available, numMatrixFrames
206 // will be 0. 234 // will be 0.
207 void render(BelaContext* context, void* userData) 235 void render(BelaContext* context, void* userData)
208 { 236 {
209 float* audioIn = context->audioIn;
210 float* audioOut = context->audioOut; 237 float* audioOut = context->audioOut;
211 int numAudioFrames = context->audioFrames; 238 int numAudioFrames = context->audioFrames;
212 int numAudioChannels = context->audioChannels; 239 int numAudioChannels = context->audioChannels;
213 // ------ this code internal to the demo; leave as is ---------------- 240 // ------ this code internal to the demo; leave as is ----------------
214 241
215 // Prep the "input" to be the sound file played in a loop 242 // Prep the "input" to be the sound file played in a loop
216 for(int n = 0; n < numAudioFrames; n++) { 243 for(int n = 0; n < numAudioFrames; n++) {
217 if(gReadPtr < gSampleData.sampleLen) 244 if(gReadPtr < gSampleData.sampleLen)
218 audioIn[2*n] = audioIn[2*n+1] = gSampleData.samples[gReadPtr]*(1-gPlaybackLive) + 245 gInputAudio[2*n] = gInputAudio[2*n+1] = gSampleData.samples[gReadPtr]*(1-gPlaybackLive) +
219 gPlaybackLive*0.5f*(audioRead(context,n,0)+audioRead(context,n,1)); 246 gPlaybackLive*0.5f*(audioRead(context,n,0)+audioRead(context,n,1));
220 else 247 else
221 audioIn[2*n] = audioIn[2*n+1] = 0; 248 gInputAudio[2*n] = gInputAudio[2*n+1] = 0;
222 if(++gReadPtr >= gSampleData.sampleLen) 249 if(++gReadPtr >= gSampleData.sampleLen)
223 gReadPtr = 0; 250 gReadPtr = 0;
224 } 251 }
225 // ------------------------------------------------------------------- 252 // -------------------------------------------------------------------
226 253
227 for(int n = 0; n < numAudioFrames; n++) { 254 for(int n = 0; n < numAudioFrames; n++) {
228 gInputBuffer[gInputBufferPointer] = ((audioIn[n*numAudioChannels] + audioIn[n*numAudioChannels+1]) * 0.5); 255 gInputBuffer[gInputBufferPointer] = ((gInputAudio[n*numAudioChannels] + gInputAudio[n*numAudioChannels+1]) * 0.5);
229 256
230 // Copy output buffer to output 257 // Copy output buffer to output
231 for(int channel = 0; channel < numAudioChannels; channel++){ 258 for(int channel = 0; channel < numAudioChannels; channel++){
232 audioOut[n * numAudioChannels + channel] = gOutputBuffer[gOutputBufferReadPointer] * gGain * gDryWet + (1 - gDryWet) * audioIn[n * numAudioChannels + channel]; 259 audioOut[n * numAudioChannels + channel] = gOutputBuffer[gOutputBufferReadPointer] * gGain * gDryWet + (1 - gDryWet) * gInputAudio[n * numAudioChannels + channel];
233 } 260 }
234 261
235 // Clear the output sample in the buffer so it is ready for the next overlap-add 262 // Clear the output sample in the buffer so it is ready for the next overlap-add
236 gOutputBuffer[gOutputBufferReadPointer] = 0; 263 gOutputBuffer[gOutputBufferReadPointer] = 0;
237 gOutputBufferReadPointer++; 264 gOutputBufferReadPointer++;
265 { 292 {
266 NE10_FREE(timeDomainIn); 293 NE10_FREE(timeDomainIn);
267 NE10_FREE(timeDomainOut); 294 NE10_FREE(timeDomainOut);
268 NE10_FREE(frequencyDomain); 295 NE10_FREE(frequencyDomain);
269 NE10_FREE(cfg); 296 NE10_FREE(cfg);
297 free(gInputAudio);
270 free(gWindowBuffer); 298 free(gWindowBuffer);
271 } 299 }