robert@372: /* robert@372: ____ _____ _ _ robert@372: | __ )| ____| | / \ robert@372: | _ \| _| | | / _ \ robert@372: | |_) | |___| |___ / ___ \ robert@372: |____/|_____|_____/_/ \_\.io robert@372: robert@372: */ robert@372: victor@4: /* victor@4: * render.cpp victor@4: * victor@4: * Created on: Oct 24, 2014 andrewm@373: * Author: Andrew McPherson, C4DM, QMUL victor@4: */ victor@4: robert@372: /** robert@372: \example 4_audio_FFT robert@372: robert@372: Fast Fourier Transform robert@372: ---------------------- robert@372: robert@372: This sketch performs an FFT (Fast Fourier Transform) on incoming audio. It uses robert@372: the NE10 library, included at the top of the file (line 11). robert@372: robert@372: Read the documentation on the NE10 library [here](http://projectne10.github.io/Ne10/doc/annotated.html). robert@372: robert@372: The variables `timeDomainIn`, `timeDomainOut` and `frequencyDomain` are robert@372: variables of the struct `ne10_fft_cpx_float32_t` [http://projectne10.github.io/Ne10/doc/structne10__fft__cpx__float32__t.html](http://projectne10.github.io/Ne10/doc/structne10__fft__cpx__float32__t.html). robert@372: These are declared at the top of the file (line 21), and memory is allocated robert@372: for them in `setup()` (line 41). robert@372: robert@372: In `render()` a `for` loop performs the FFT which is performed on each sample, robert@372: and the resulting output is placed on each channel. robert@372: */ victor@4: giuliomoro@301: #include victor@4: #include andrewm@379: #include // neon library victor@4: #include victor@4: andrewm@5: int gFFTSize; victor@4: victor@4: int gReadPointer = 0; victor@4: int gWritePointer = 0; victor@4: victor@4: // FFT vars andrewm@373: static ne10_fft_cpx_float32_t* timeDomainIn; andrewm@373: static ne10_fft_cpx_float32_t* timeDomainOut; andrewm@373: static ne10_fft_cpx_float32_t* frequencyDomain; andrewm@373: static ne10_fft_cfg_float32_t cfg; victor@4: andrewm@56: // setup() is called once before the audio rendering starts. victor@4: // Use it to perform any initialisation and allocation which is dependent victor@4: // on the period size or sample rate. victor@4: // victor@4: // userData holds an opaque pointer to a data structure that was passed victor@4: // in from the call to initAudio(). victor@4: // victor@4: // Return true on success; returning false halts the program. victor@4: giuliomoro@301: bool setup(BelaContext *context, void *userData) victor@4: { victor@4: // Retrieve a parameter passed in from the initAudio() call andrewm@5: gFFTSize = *(int *)userData; victor@4: andrewm@5: timeDomainIn = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); andrewm@5: timeDomainOut = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); andrewm@5: frequencyDomain = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); andrewm@373: cfg = ne10_fft_alloc_c2c_float32_neon (gFFTSize); victor@4: andrewm@5: memset(timeDomainOut, 0, gFFTSize * sizeof (ne10_fft_cpx_float32_t)); victor@4: victor@4: return true; victor@4: } victor@4: victor@4: // render() is called regularly at the highest priority by the audio engine. victor@4: // Input and output are given from the audio hardware and the other victor@4: // ADCs and DACs (if available). If only audio is available, numMatrixFrames victor@4: // will be 0. victor@4: giuliomoro@301: void render(BelaContext *context, void *userData) victor@4: { andrewm@52: for(unsigned int n = 0; n < context->audioFrames; n++) { andrewm@52: timeDomainIn[gReadPointer].r = (ne10_float32_t) ((context->audioIn[n*context->audioChannels] + andrewm@52: context->audioIn[n*context->audioChannels+1]) * 0.5); andrewm@5: timeDomainIn[gReadPointer].i = 0; victor@4: andrewm@5: if(++gReadPointer >= gFFTSize) andrewm@5: { andrewm@5: //FFT andrewm@373: ne10_fft_c2c_1d_float32_neon (frequencyDomain, timeDomainIn, cfg, 0); victor@4: andrewm@5: //Do frequency domain stuff victor@4: andrewm@5: //IFFT andrewm@373: ne10_fft_c2c_1d_float32_neon (timeDomainOut, frequencyDomain, cfg, 1); victor@4: andrewm@5: gReadPointer = 0; andrewm@5: gWritePointer = 0; andrewm@5: } victor@4: andrewm@56: for(unsigned int channel = 0; channel < context->audioChannels; channel++) andrewm@373: context->audioOut[n * context->audioChannels + channel] = (float) timeDomainOut[gWritePointer].r; andrewm@5: gWritePointer++; victor@4: } victor@4: } victor@4: andrewm@56: // cleanup() is called once at the end, after the audio has stopped. andrewm@56: // Release any resources that were allocated in setup(). victor@4: giuliomoro@301: void cleanup(BelaContext *context, void *userData) victor@4: { andrewm@5: NE10_FREE(timeDomainIn); andrewm@5: NE10_FREE(timeDomainOut); victor@4: NE10_FREE(frequencyDomain); victor@4: NE10_FREE(cfg); victor@4: }