annotate examples/04-Audio/FFT-phase-vocoder/render.cpp @ 544:cdabbaf3a252 prerelease

Updated Audio examples for audioOutChannels etc.
author Robert Jack <robert.h.jack@gmail.com>
date Fri, 24 Jun 2016 13:32:07 +0100
parents 1cec96845a23
children
rev   line source
robert@464 1 /*
robert@464 2 ____ _____ _ _
robert@464 3 | __ )| ____| | / \
robert@464 4 | _ \| _| | | / _ \
robert@464 5 | |_) | |___| |___ / ___ \
robert@464 6 |____/|_____|_____/_/ \_\
robert@464 7
robert@464 8 The platform for ultra-low latency audio and sensor processing
robert@464 9
robert@464 10 http://bela.io
robert@464 11
robert@464 12 A project of the Augmented Instruments Laboratory within the
robert@464 13 Centre for Digital Music at Queen Mary University of London.
robert@464 14 http://www.eecs.qmul.ac.uk/~andrewm
robert@464 15
robert@464 16 (c) 2016 Augmented Instruments Laboratory: Andrew McPherson,
robert@464 17 Astrid Bin, Liam Donovan, Christian Heinrichs, Robert Jack,
robert@464 18 Giulio Moro, Laurel Pardue, Victor Zappi. All rights reserved.
robert@464 19
robert@464 20 The Bela software is distributed under the GNU Lesser General Public License
robert@464 21 (LGPL 3.0), available here: https://www.gnu.org/licenses/lgpl-3.0.txt
robert@464 22 */
robert@464 23
robert@464 24
robert@464 25 #include <Bela.h>
robert@464 26 #include <rtdk.h>
robert@464 27 #include <ne10/NE10.h> // NEON FFT library
robert@464 28 #include <cmath>
robert@464 29 #include "SampleData.h"
robert@464 30 #include <Midi.h>
robert@464 31
robert@464 32 #define BUFFER_SIZE 16384
robert@464 33
robert@464 34 // TODO: your buffer and counter go here!
robert@464 35 float gInputBuffer[BUFFER_SIZE];
robert@464 36 int gInputBufferPointer = 0;
robert@464 37 float gOutputBuffer[BUFFER_SIZE];
robert@464 38 int gOutputBufferWritePointer = 0;
robert@464 39 int gOutputBufferReadPointer = 0;
robert@464 40 int gSampleCount = 0;
robert@464 41
robert@464 42 float *gWindowBuffer;
robert@464 43
robert@464 44 // -----------------------------------------------
robert@464 45 // These variables used internally in the example:
robert@464 46 int gFFTSize = 2048;
robert@464 47 int gHopSize = 512;
robert@464 48 int gPeriod = 512;
robert@464 49 float gFFTScaleFactor = 0;
robert@464 50
robert@464 51 // FFT vars
robert@464 52 ne10_fft_cpx_float32_t* timeDomainIn;
robert@464 53 ne10_fft_cpx_float32_t* timeDomainOut;
robert@464 54 ne10_fft_cpx_float32_t* frequencyDomain;
robert@464 55 ne10_fft_cfg_float32_t cfg;
robert@464 56
robert@464 57 // Sample info
robert@464 58 SampleData gSampleData; // User defined structure to get complex data from main
robert@464 59 int gReadPtr = 0; // Position of last read sample from file
robert@464 60
robert@464 61 // Auxiliary task for calculating FFT
robert@464 62 AuxiliaryTask gFFTTask;
robert@464 63 int gFFTInputBufferPointer = 0;
robert@464 64 int gFFTOutputBufferPointer = 0;
robert@464 65
robert@464 66 void process_fft_background();
robert@464 67
robert@464 68
robert@464 69 int gEffect = 0; // change this here or with midi CC
robert@464 70 enum{
robert@464 71 kBypass,
robert@464 72 kRobot,
robert@464 73 kWhisper,
robert@464 74 };
robert@464 75
robert@464 76 float gDryWet = 1; // mix between the unprocessed and processed sound
robert@464 77 float gPlaybackLive = 0.5f; // mix between the file playback and the live audio input
robert@464 78 float gGain = 1; // overall gain
robert@464 79 float *gInputAudio = NULL;
robert@464 80 Midi midi;
robert@464 81
robert@464 82
robert@464 83 void midiCallback(MidiChannelMessage message, void* arg){
robert@464 84 if(message.getType() == kmmNoteOn){
robert@464 85 if(message.getDataByte(1) > 0){
robert@464 86 int note = message.getDataByte(0);
robert@464 87 float frequency = powf(2, (note-69)/12.f)*440;
robert@464 88 gPeriod = (int)(44100 / frequency + 0.5);
robert@464 89 printf("\nnote: %d, frequency: %f, hop: %d\n", note, frequency, gPeriod);
robert@464 90 }
robert@464 91 }
robert@464 92
robert@464 93 bool shouldPrint = false;
robert@464 94 if(message.getType() == kmmControlChange){
robert@464 95 float data = message.getDataByte(1) / 127.0f;
robert@464 96 switch (message.getDataByte(0)){
robert@464 97 case 2 :
robert@464 98 gEffect = (int)(data * 2 + 0.5); // CC2 selects an effect between 0,1,2
robert@464 99 break;
robert@464 100 case 3 :
robert@464 101 gPlaybackLive = data;
robert@464 102 break;
robert@464 103 case 4 :
robert@464 104 gDryWet = data;
robert@464 105 break;
robert@464 106 case 5:
robert@464 107 gGain = data*10;
robert@464 108 break;
robert@464 109 default:
robert@464 110 shouldPrint = true;
robert@464 111 }
robert@464 112 }
robert@464 113 if(shouldPrint){
robert@464 114 message.prettyPrint();
robert@464 115 }
robert@464 116 }
robert@464 117
robert@464 118 // userData holds an opaque pointer to a data structure that was passed
robert@464 119 // in from the call to initAudio().
robert@464 120 //
robert@464 121 // Return true on success; returning false halts the program.
robert@464 122 bool setup(BelaContext* context, void* userData)
robert@464 123 {
robert@544 124 // Check that we have the same number of inputs and outputs.
robert@544 125 if(context->audioInChannels != context->audioOutChannels ||
robert@544 126 context->analogInChannels != context-> analogOutChannels){
robert@544 127 printf("Error: for this project, you need the same number of input and output channels.\n");
robert@544 128 return false;
robert@544 129 }
robert@544 130
robert@464 131 midi.readFrom(0);
robert@464 132 midi.setParserCallback(midiCallback);
robert@464 133 // Retrieve a parameter passed in from the initAudio() call
robert@464 134 gSampleData = *(SampleData *)userData;
robert@464 135
robert@464 136 gFFTScaleFactor = 1.0f / (float)gFFTSize;
robert@464 137 gOutputBufferWritePointer += gHopSize;
robert@464 138
robert@464 139 timeDomainIn = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t));
robert@464 140 timeDomainOut = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t));
robert@464 141 frequencyDomain = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t));
robert@464 142 cfg = ne10_fft_alloc_c2c_float32_neon (gFFTSize);
robert@464 143
robert@464 144 memset(timeDomainOut, 0, gFFTSize * sizeof (ne10_fft_cpx_float32_t));
robert@464 145 memset(gOutputBuffer, 0, BUFFER_SIZE * sizeof(float));
robert@464 146
robert@464 147 // Allocate buffer to mirror and modify the input
robert@544 148 gInputAudio = (float *)malloc(context->audioFrames * context->audioOutChannels * sizeof(float));
robert@464 149 if(gInputAudio == 0)
robert@464 150 return false;
robert@464 151
robert@464 152 // Allocate the window buffer based on the FFT size
robert@464 153 gWindowBuffer = (float *)malloc(gFFTSize * sizeof(float));
robert@464 154 if(gWindowBuffer == 0)
robert@464 155 return false;
robert@464 156
robert@464 157 // Calculate a Hann window
robert@464 158 for(int n = 0; n < gFFTSize; n++) {
robert@464 159 gWindowBuffer[n] = 0.5f * (1.0f - cosf(2.0 * M_PI * n / (float)(gFFTSize - 1)));
robert@464 160 }
robert@464 161
robert@464 162 // Initialise auxiliary tasks
robert@464 163 if((gFFTTask = Bela_createAuxiliaryTask(&process_fft_background, 90, "fft-calculation")) == 0)
robert@464 164 return false;
robert@464 165 rt_printf("You are listening to an FFT phase-vocoder with overlap-and-add.\n"
robert@464 166 "Use Midi Control Change to control:\n"
robert@464 167 "CC 2: effect type (bypass/robotization/whisperization)\n"
robert@464 168 "CC 3: mix between recorded sample and live audio input\n"
robert@464 169 "CC 4: mix between the unprocessed and processed sound\n"
robert@464 170 "CC 5: gain\n"
robert@464 171 );
robert@464 172 return true;
robert@464 173 }
robert@464 174
robert@464 175 // This function handles the FFT processing in this example once the buffer has
robert@464 176 // been assembled.
robert@464 177 void process_fft(float *inBuffer, int inWritePointer, float *outBuffer, int outWritePointer)
robert@464 178 {
robert@464 179 // Copy buffer into FFT input
robert@464 180 int pointer = (inWritePointer - gFFTSize + BUFFER_SIZE) % BUFFER_SIZE;
robert@464 181 for(int n = 0; n < gFFTSize; n++) {
robert@464 182 timeDomainIn[n].r = (ne10_float32_t) inBuffer[pointer] * gWindowBuffer[n];
robert@464 183 timeDomainIn[n].i = 0;
robert@464 184
robert@464 185 pointer++;
robert@464 186 if(pointer >= BUFFER_SIZE)
robert@464 187 pointer = 0;
robert@464 188 }
robert@464 189
robert@464 190 // Run the FFT
robert@464 191 ne10_fft_c2c_1d_float32_neon (frequencyDomain, timeDomainIn, cfg, 0);
robert@464 192
robert@464 193 switch (gEffect){
robert@464 194 case kRobot :
robert@464 195 // Robotise the output
robert@464 196 for(int n = 0; n < gFFTSize; n++) {
robert@464 197 float amplitude = sqrtf(frequencyDomain[n].r * frequencyDomain[n].r + frequencyDomain[n].i * frequencyDomain[n].i);
robert@464 198 frequencyDomain[n].r = amplitude;
robert@464 199 frequencyDomain[n].i = 0;
robert@464 200 }
robert@464 201 break;
robert@464 202 case kWhisper :
robert@464 203 for(int n = 0; n < gFFTSize; n++) {
robert@464 204 float amplitude = sqrtf(frequencyDomain[n].r * frequencyDomain[n].r + frequencyDomain[n].i * frequencyDomain[n].i);
robert@464 205 float phase = rand()/(float)RAND_MAX * 2 * M_PI;
robert@464 206 frequencyDomain[n].r = cosf(phase) * amplitude;
robert@464 207 frequencyDomain[n].i = sinf(phase) * amplitude;
robert@464 208 }
robert@464 209 break;
robert@464 210 case kBypass:
robert@464 211 //bypass
robert@464 212 break;
robert@464 213 }
robert@464 214
robert@464 215 // Run the inverse FFT
robert@464 216 ne10_fft_c2c_1d_float32_neon (timeDomainOut, frequencyDomain, cfg, 1);
robert@464 217 // Overlap-and-add timeDomainOut into the output buffer
robert@464 218 pointer = outWritePointer;
robert@464 219 for(int n = 0; n < gFFTSize; n++) {
robert@464 220 outBuffer[pointer] += (timeDomainOut[n].r) * gFFTScaleFactor;
robert@464 221 if(isnan(outBuffer[pointer]))
robert@464 222 rt_printf("outBuffer OLA\n");
robert@464 223 pointer++;
robert@464 224 if(pointer >= BUFFER_SIZE)
robert@464 225 pointer = 0;
robert@464 226 }
robert@464 227 }
robert@464 228
robert@464 229 // Function to process the FFT in a thread at lower priority
robert@464 230 void process_fft_background() {
robert@464 231 process_fft(gInputBuffer, gFFTInputBufferPointer, gOutputBuffer, gFFTOutputBufferPointer);
robert@464 232 }
robert@464 233
robert@464 234 // render() is called regularly at the highest priority by the audio engine.
robert@464 235 // Input and output are given from the audio hardware and the other
robert@464 236 // ADCs and DACs (if available). If only audio is available, numMatrixFrames
robert@464 237 // will be 0.
robert@464 238 void render(BelaContext* context, void* userData)
robert@464 239 {
robert@464 240 float* audioOut = context->audioOut;
robert@464 241 int numAudioFrames = context->audioFrames;
robert@544 242 int numAudioChannels = context->audioOutChannels;
robert@464 243 // ------ this code internal to the demo; leave as is ----------------
robert@464 244
robert@464 245 // Prep the "input" to be the sound file played in a loop
robert@464 246 for(int n = 0; n < numAudioFrames; n++) {
robert@464 247 if(gReadPtr < gSampleData.sampleLen)
robert@464 248 gInputAudio[2*n] = gInputAudio[2*n+1] = gSampleData.samples[gReadPtr]*(1-gPlaybackLive) +
robert@464 249 gPlaybackLive*0.5f*(audioRead(context,n,0)+audioRead(context,n,1));
robert@464 250 else
robert@464 251 gInputAudio[2*n] = gInputAudio[2*n+1] = 0;
robert@464 252 if(++gReadPtr >= gSampleData.sampleLen)
robert@464 253 gReadPtr = 0;
robert@464 254 }
robert@464 255 // -------------------------------------------------------------------
robert@464 256
robert@464 257 for(int n = 0; n < numAudioFrames; n++) {
robert@464 258 gInputBuffer[gInputBufferPointer] = ((gInputAudio[n*numAudioChannels] + gInputAudio[n*numAudioChannels+1]) * 0.5);
robert@464 259
robert@464 260 // Copy output buffer to output
robert@464 261 for(int channel = 0; channel < numAudioChannels; channel++){
robert@464 262 audioOut[n * numAudioChannels + channel] = gOutputBuffer[gOutputBufferReadPointer] * gGain * gDryWet + (1 - gDryWet) * gInputAudio[n * numAudioChannels + channel];
robert@464 263 }
robert@464 264
robert@464 265 // Clear the output sample in the buffer so it is ready for the next overlap-add
robert@464 266 gOutputBuffer[gOutputBufferReadPointer] = 0;
robert@464 267 gOutputBufferReadPointer++;
robert@464 268 if(gOutputBufferReadPointer >= BUFFER_SIZE)
robert@464 269 gOutputBufferReadPointer = 0;
robert@464 270 gOutputBufferWritePointer++;
robert@464 271 if(gOutputBufferWritePointer >= BUFFER_SIZE)
robert@464 272 gOutputBufferWritePointer = 0;
robert@464 273
robert@464 274 gInputBufferPointer++;
robert@464 275 if(gInputBufferPointer >= BUFFER_SIZE)
robert@464 276 gInputBufferPointer = 0;
robert@464 277
robert@464 278 gSampleCount++;
robert@464 279 if(gSampleCount >= gHopSize) {
robert@464 280 //process_fft(gInputBuffer, gInputBufferPointer, gOutputBuffer, gOutputBufferPointer);
robert@464 281 gFFTInputBufferPointer = gInputBufferPointer;
robert@464 282 gFFTOutputBufferPointer = gOutputBufferWritePointer;
robert@464 283 Bela_scheduleAuxiliaryTask(gFFTTask);
robert@464 284
robert@464 285 gSampleCount = 0;
robert@464 286 }
robert@464 287 }
robert@464 288 gHopSize = gPeriod;
robert@464 289 }
robert@464 290
robert@464 291 // cleanup_render() is called once at the end, after the audio has stopped.
robert@464 292 // Release any resources that were allocated in initialise_render().
robert@464 293
robert@464 294 void cleanup(BelaContext* context, void* userData)
robert@464 295 {
robert@464 296 NE10_FREE(timeDomainIn);
robert@464 297 NE10_FREE(timeDomainOut);
robert@464 298 NE10_FREE(frequencyDomain);
robert@464 299 NE10_FREE(cfg);
robert@464 300 free(gInputAudio);
robert@464 301 free(gWindowBuffer);
robert@464 302 }
robert@464 303
robert@464 304
robert@464 305 /**
robert@500 306 \example FFT-phase-vocoder/render.cpp
robert@464 307
robert@464 308 Phase Vocoder
robert@464 309 ----------------------
robert@464 310
robert@464 311 This sketch shows an implementation of a phase vocoder and builds on the previous FFT example.
robert@464 312 Again it uses the NE10 library, included at the top of the file.
robert@464 313
robert@464 314 Read the documentation on the NE10 library [here](http://projectne10.github.io/Ne10/doc/annotated.html).
robert@464 315 */