comparison examples/04-Audio/FFT-phase-vocoder/render.cpp @ 468:85cf9c0da052 prerelease

merge
author Giulio Moro <giuliomoro@yahoo.it>
date Mon, 20 Jun 2016 17:08:02 +0100
parents 8fcfbfb32aa0
children b935f890e512
comparison
equal deleted inserted replaced
467:03a2cd5f151b 468:85cf9c0da052
1 /*
2 ____ _____ _ _
3 | __ )| ____| | / \
4 | _ \| _| | | / _ \
5 | |_) | |___| |___ / ___ \
6 |____/|_____|_____/_/ \_\
7
8 The platform for ultra-low latency audio and sensor processing
9
10 http://bela.io
11
12 A project of the Augmented Instruments Laboratory within the
13 Centre for Digital Music at Queen Mary University of London.
14 http://www.eecs.qmul.ac.uk/~andrewm
15
16 (c) 2016 Augmented Instruments Laboratory: Andrew McPherson,
17 Astrid Bin, Liam Donovan, Christian Heinrichs, Robert Jack,
18 Giulio Moro, Laurel Pardue, Victor Zappi. All rights reserved.
19
20 The Bela software is distributed under the GNU Lesser General Public License
21 (LGPL 3.0), available here: https://www.gnu.org/licenses/lgpl-3.0.txt
22 */
23
24
25 #include <Bela.h>
26 #include <rtdk.h>
27 #include <ne10/NE10.h> // NEON FFT library
28 #include <cmath>
29 #include "SampleData.h"
30 #include <Midi.h>
31
32 #define BUFFER_SIZE 16384
33
34 // TODO: your buffer and counter go here!
35 float gInputBuffer[BUFFER_SIZE];
36 int gInputBufferPointer = 0;
37 float gOutputBuffer[BUFFER_SIZE];
38 int gOutputBufferWritePointer = 0;
39 int gOutputBufferReadPointer = 0;
40 int gSampleCount = 0;
41
42 float *gWindowBuffer;
43
44 // -----------------------------------------------
45 // These variables used internally in the example:
46 int gFFTSize = 2048;
47 int gHopSize = 512;
48 int gPeriod = 512;
49 float gFFTScaleFactor = 0;
50
51 // FFT vars
52 ne10_fft_cpx_float32_t* timeDomainIn;
53 ne10_fft_cpx_float32_t* timeDomainOut;
54 ne10_fft_cpx_float32_t* frequencyDomain;
55 ne10_fft_cfg_float32_t cfg;
56
57 // Sample info
58 SampleData gSampleData; // User defined structure to get complex data from main
59 int gReadPtr = 0; // Position of last read sample from file
60
61 // Auxiliary task for calculating FFT
62 AuxiliaryTask gFFTTask;
63 int gFFTInputBufferPointer = 0;
64 int gFFTOutputBufferPointer = 0;
65
66 void process_fft_background();
67
68
69 int gEffect = 0; // change this here or with midi CC
70 enum{
71 kBypass,
72 kRobot,
73 kWhisper,
74 };
75
76 float gDryWet = 1; // mix between the unprocessed and processed sound
77 float gPlaybackLive = 0.5f; // mix between the file playback and the live audio input
78 float gGain = 1; // overall gain
79 float *gInputAudio = NULL;
80 Midi midi;
81
82
83 void midiCallback(MidiChannelMessage message, void* arg){
84 if(message.getType() == kmmNoteOn){
85 if(message.getDataByte(1) > 0){
86 int note = message.getDataByte(0);
87 float frequency = powf(2, (note-69)/12.f)*440;
88 gPeriod = (int)(44100 / frequency + 0.5);
89 printf("\nnote: %d, frequency: %f, hop: %d\n", note, frequency, gPeriod);
90 }
91 }
92
93 bool shouldPrint = false;
94 if(message.getType() == kmmControlChange){
95 float data = message.getDataByte(1) / 127.0f;
96 switch (message.getDataByte(0)){
97 case 2 :
98 gEffect = (int)(data * 2 + 0.5); // CC2 selects an effect between 0,1,2
99 break;
100 case 3 :
101 gPlaybackLive = data;
102 break;
103 case 4 :
104 gDryWet = data;
105 break;
106 case 5:
107 gGain = data*10;
108 break;
109 default:
110 shouldPrint = true;
111 }
112 }
113 if(shouldPrint){
114 message.prettyPrint();
115 }
116 }
117
118 // userData holds an opaque pointer to a data structure that was passed
119 // in from the call to initAudio().
120 //
121 // Return true on success; returning false halts the program.
122 bool setup(BelaContext* context, void* userData)
123 {
124 midi.readFrom(0);
125 midi.setParserCallback(midiCallback);
126 // Retrieve a parameter passed in from the initAudio() call
127 gSampleData = *(SampleData *)userData;
128
129 gFFTScaleFactor = 1.0f / (float)gFFTSize;
130 gOutputBufferWritePointer += gHopSize;
131
132 timeDomainIn = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t));
133 timeDomainOut = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t));
134 frequencyDomain = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t));
135 cfg = ne10_fft_alloc_c2c_float32_neon (gFFTSize);
136
137 memset(timeDomainOut, 0, gFFTSize * sizeof (ne10_fft_cpx_float32_t));
138 memset(gOutputBuffer, 0, BUFFER_SIZE * sizeof(float));
139
140 // Allocate buffer to mirror and modify the input
141 gInputAudio = (float *)malloc(context->audioFrames * context->audioChannels * sizeof(float));
142 if(gInputAudio == 0)
143 return false;
144
145 // Allocate the window buffer based on the FFT size
146 gWindowBuffer = (float *)malloc(gFFTSize * sizeof(float));
147 if(gWindowBuffer == 0)
148 return false;
149
150 // Calculate a Hann window
151 for(int n = 0; n < gFFTSize; n++) {
152 gWindowBuffer[n] = 0.5f * (1.0f - cosf(2.0 * M_PI * n / (float)(gFFTSize - 1)));
153 }
154
155 // Initialise auxiliary tasks
156 if((gFFTTask = Bela_createAuxiliaryTask(&process_fft_background, 90, "fft-calculation")) == 0)
157 return false;
158 rt_printf("You are listening to an FFT phase-vocoder with overlap-and-add.\n"
159 "Use Midi Control Change to control:\n"
160 "CC 2: effect type (bypass/robotization/whisperization)\n"
161 "CC 3: mix between recorded sample and live audio input\n"
162 "CC 4: mix between the unprocessed and processed sound\n"
163 "CC 5: gain\n"
164 );
165 return true;
166 }
167
168 // This function handles the FFT processing in this example once the buffer has
169 // been assembled.
170 void process_fft(float *inBuffer, int inWritePointer, float *outBuffer, int outWritePointer)
171 {
172 // Copy buffer into FFT input
173 int pointer = (inWritePointer - gFFTSize + BUFFER_SIZE) % BUFFER_SIZE;
174 for(int n = 0; n < gFFTSize; n++) {
175 timeDomainIn[n].r = (ne10_float32_t) inBuffer[pointer] * gWindowBuffer[n];
176 timeDomainIn[n].i = 0;
177
178 pointer++;
179 if(pointer >= BUFFER_SIZE)
180 pointer = 0;
181 }
182
183 // Run the FFT
184 ne10_fft_c2c_1d_float32_neon (frequencyDomain, timeDomainIn, cfg, 0);
185
186 switch (gEffect){
187 case kRobot :
188 // Robotise the output
189 for(int n = 0; n < gFFTSize; n++) {
190 float amplitude = sqrtf(frequencyDomain[n].r * frequencyDomain[n].r + frequencyDomain[n].i * frequencyDomain[n].i);
191 frequencyDomain[n].r = amplitude;
192 frequencyDomain[n].i = 0;
193 }
194 break;
195 case kWhisper :
196 for(int n = 0; n < gFFTSize; n++) {
197 float amplitude = sqrtf(frequencyDomain[n].r * frequencyDomain[n].r + frequencyDomain[n].i * frequencyDomain[n].i);
198 float phase = rand()/(float)RAND_MAX * 2 * M_PI;
199 frequencyDomain[n].r = cosf(phase) * amplitude;
200 frequencyDomain[n].i = sinf(phase) * amplitude;
201 }
202 break;
203 case kBypass:
204 //bypass
205 break;
206 }
207
208 // Run the inverse FFT
209 ne10_fft_c2c_1d_float32_neon (timeDomainOut, frequencyDomain, cfg, 1);
210 // Overlap-and-add timeDomainOut into the output buffer
211 pointer = outWritePointer;
212 for(int n = 0; n < gFFTSize; n++) {
213 outBuffer[pointer] += (timeDomainOut[n].r) * gFFTScaleFactor;
214 if(isnan(outBuffer[pointer]))
215 rt_printf("outBuffer OLA\n");
216 pointer++;
217 if(pointer >= BUFFER_SIZE)
218 pointer = 0;
219 }
220 }
221
222 // Function to process the FFT in a thread at lower priority
223 void process_fft_background() {
224 process_fft(gInputBuffer, gFFTInputBufferPointer, gOutputBuffer, gFFTOutputBufferPointer);
225 }
226
227 // render() is called regularly at the highest priority by the audio engine.
228 // Input and output are given from the audio hardware and the other
229 // ADCs and DACs (if available). If only audio is available, numMatrixFrames
230 // will be 0.
231 void render(BelaContext* context, void* userData)
232 {
233 float* audioOut = context->audioOut;
234 int numAudioFrames = context->audioFrames;
235 int numAudioChannels = context->audioChannels;
236 // ------ this code internal to the demo; leave as is ----------------
237
238 // Prep the "input" to be the sound file played in a loop
239 for(int n = 0; n < numAudioFrames; n++) {
240 if(gReadPtr < gSampleData.sampleLen)
241 gInputAudio[2*n] = gInputAudio[2*n+1] = gSampleData.samples[gReadPtr]*(1-gPlaybackLive) +
242 gPlaybackLive*0.5f*(audioRead(context,n,0)+audioRead(context,n,1));
243 else
244 gInputAudio[2*n] = gInputAudio[2*n+1] = 0;
245 if(++gReadPtr >= gSampleData.sampleLen)
246 gReadPtr = 0;
247 }
248 // -------------------------------------------------------------------
249
250 for(int n = 0; n < numAudioFrames; n++) {
251 gInputBuffer[gInputBufferPointer] = ((gInputAudio[n*numAudioChannels] + gInputAudio[n*numAudioChannels+1]) * 0.5);
252
253 // Copy output buffer to output
254 for(int channel = 0; channel < numAudioChannels; channel++){
255 audioOut[n * numAudioChannels + channel] = gOutputBuffer[gOutputBufferReadPointer] * gGain * gDryWet + (1 - gDryWet) * gInputAudio[n * numAudioChannels + channel];
256 }
257
258 // Clear the output sample in the buffer so it is ready for the next overlap-add
259 gOutputBuffer[gOutputBufferReadPointer] = 0;
260 gOutputBufferReadPointer++;
261 if(gOutputBufferReadPointer >= BUFFER_SIZE)
262 gOutputBufferReadPointer = 0;
263 gOutputBufferWritePointer++;
264 if(gOutputBufferWritePointer >= BUFFER_SIZE)
265 gOutputBufferWritePointer = 0;
266
267 gInputBufferPointer++;
268 if(gInputBufferPointer >= BUFFER_SIZE)
269 gInputBufferPointer = 0;
270
271 gSampleCount++;
272 if(gSampleCount >= gHopSize) {
273 //process_fft(gInputBuffer, gInputBufferPointer, gOutputBuffer, gOutputBufferPointer);
274 gFFTInputBufferPointer = gInputBufferPointer;
275 gFFTOutputBufferPointer = gOutputBufferWritePointer;
276 Bela_scheduleAuxiliaryTask(gFFTTask);
277
278 gSampleCount = 0;
279 }
280 }
281 gHopSize = gPeriod;
282 }
283
284 // cleanup_render() is called once at the end, after the audio has stopped.
285 // Release any resources that were allocated in initialise_render().
286
287 void cleanup(BelaContext* context, void* userData)
288 {
289 NE10_FREE(timeDomainIn);
290 NE10_FREE(timeDomainOut);
291 NE10_FREE(frequencyDomain);
292 NE10_FREE(cfg);
293 free(gInputAudio);
294 free(gWindowBuffer);
295 }
296
297 /* ------------ Project Explantation ------------ */
298
299 /**
300 \example 04_audio_FFT_phase_vocoder
301
302 Phase Vocoder
303 ----------------------
304
305 This sketch shows an implementation of a phase vocoder and builds on the previous FFT example.
306 Again it uses the NE10 library, included at the top of the file.
307
308 Read the documentation on the NE10 library [here](http://projectne10.github.io/Ne10/doc/annotated.html).
309 */