robert@464: /* robert@464: ____ _____ _ _ robert@464: | __ )| ____| | / \ robert@464: | _ \| _| | | / _ \ robert@464: | |_) | |___| |___ / ___ \ robert@464: |____/|_____|_____/_/ \_\ robert@464: robert@464: The platform for ultra-low latency audio and sensor processing robert@464: robert@464: http://bela.io robert@464: robert@464: A project of the Augmented Instruments Laboratory within the robert@464: Centre for Digital Music at Queen Mary University of London. robert@464: http://www.eecs.qmul.ac.uk/~andrewm robert@464: robert@464: (c) 2016 Augmented Instruments Laboratory: Andrew McPherson, robert@464: Astrid Bin, Liam Donovan, Christian Heinrichs, Robert Jack, robert@464: Giulio Moro, Laurel Pardue, Victor Zappi. All rights reserved. robert@464: robert@464: The Bela software is distributed under the GNU Lesser General Public License robert@464: (LGPL 3.0), available here: https://www.gnu.org/licenses/lgpl-3.0.txt robert@464: */ robert@464: robert@464: robert@464: #include robert@464: #include robert@464: #include // NEON FFT library robert@464: #include robert@464: #include "SampleData.h" robert@464: #include robert@464: robert@464: #define BUFFER_SIZE 16384 robert@464: robert@464: // TODO: your buffer and counter go here! robert@464: float gInputBuffer[BUFFER_SIZE]; robert@464: int gInputBufferPointer = 0; robert@464: float gOutputBuffer[BUFFER_SIZE]; robert@464: int gOutputBufferWritePointer = 0; robert@464: int gOutputBufferReadPointer = 0; robert@464: int gSampleCount = 0; robert@464: robert@464: float *gWindowBuffer; robert@464: robert@464: // ----------------------------------------------- robert@464: // These variables used internally in the example: robert@464: int gFFTSize = 2048; robert@464: int gHopSize = 512; robert@464: int gPeriod = 512; robert@464: float gFFTScaleFactor = 0; robert@464: robert@464: // FFT vars robert@464: ne10_fft_cpx_float32_t* timeDomainIn; robert@464: ne10_fft_cpx_float32_t* timeDomainOut; robert@464: ne10_fft_cpx_float32_t* frequencyDomain; robert@464: ne10_fft_cfg_float32_t cfg; robert@464: robert@464: // Sample info robert@464: SampleData gSampleData; // User defined structure to get complex data from main robert@464: int gReadPtr = 0; // Position of last read sample from file robert@464: robert@464: // Auxiliary task for calculating FFT robert@464: AuxiliaryTask gFFTTask; robert@464: int gFFTInputBufferPointer = 0; robert@464: int gFFTOutputBufferPointer = 0; robert@464: robert@464: void process_fft_background(); robert@464: robert@464: robert@464: int gEffect = 0; // change this here or with midi CC robert@464: enum{ robert@464: kBypass, robert@464: kRobot, robert@464: kWhisper, robert@464: }; robert@464: robert@464: float gDryWet = 1; // mix between the unprocessed and processed sound robert@464: float gPlaybackLive = 0.5f; // mix between the file playback and the live audio input robert@464: float gGain = 1; // overall gain robert@464: float *gInputAudio = NULL; robert@464: Midi midi; robert@464: robert@464: robert@464: void midiCallback(MidiChannelMessage message, void* arg){ robert@464: if(message.getType() == kmmNoteOn){ robert@464: if(message.getDataByte(1) > 0){ robert@464: int note = message.getDataByte(0); robert@464: float frequency = powf(2, (note-69)/12.f)*440; robert@464: gPeriod = (int)(44100 / frequency + 0.5); robert@464: printf("\nnote: %d, frequency: %f, hop: %d\n", note, frequency, gPeriod); robert@464: } robert@464: } robert@464: robert@464: bool shouldPrint = false; robert@464: if(message.getType() == kmmControlChange){ robert@464: float data = message.getDataByte(1) / 127.0f; robert@464: switch (message.getDataByte(0)){ robert@464: case 2 : robert@464: gEffect = (int)(data * 2 + 0.5); // CC2 selects an effect between 0,1,2 robert@464: break; robert@464: case 3 : robert@464: gPlaybackLive = data; robert@464: break; robert@464: case 4 : robert@464: gDryWet = data; robert@464: break; robert@464: case 5: robert@464: gGain = data*10; robert@464: break; robert@464: default: robert@464: shouldPrint = true; robert@464: } robert@464: } robert@464: if(shouldPrint){ robert@464: message.prettyPrint(); robert@464: } robert@464: } robert@464: robert@464: // userData holds an opaque pointer to a data structure that was passed robert@464: // in from the call to initAudio(). robert@464: // robert@464: // Return true on success; returning false halts the program. robert@464: bool setup(BelaContext* context, void* userData) robert@464: { robert@464: midi.readFrom(0); robert@464: midi.setParserCallback(midiCallback); robert@464: // Retrieve a parameter passed in from the initAudio() call robert@464: gSampleData = *(SampleData *)userData; robert@464: robert@464: gFFTScaleFactor = 1.0f / (float)gFFTSize; robert@464: gOutputBufferWritePointer += gHopSize; robert@464: robert@464: timeDomainIn = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); robert@464: timeDomainOut = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); robert@464: frequencyDomain = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); robert@464: cfg = ne10_fft_alloc_c2c_float32_neon (gFFTSize); robert@464: robert@464: memset(timeDomainOut, 0, gFFTSize * sizeof (ne10_fft_cpx_float32_t)); robert@464: memset(gOutputBuffer, 0, BUFFER_SIZE * sizeof(float)); robert@464: robert@464: // Allocate buffer to mirror and modify the input robert@464: gInputAudio = (float *)malloc(context->audioFrames * context->audioChannels * sizeof(float)); robert@464: if(gInputAudio == 0) robert@464: return false; robert@464: robert@464: // Allocate the window buffer based on the FFT size robert@464: gWindowBuffer = (float *)malloc(gFFTSize * sizeof(float)); robert@464: if(gWindowBuffer == 0) robert@464: return false; robert@464: robert@464: // Calculate a Hann window robert@464: for(int n = 0; n < gFFTSize; n++) { robert@464: gWindowBuffer[n] = 0.5f * (1.0f - cosf(2.0 * M_PI * n / (float)(gFFTSize - 1))); robert@464: } robert@464: robert@464: // Initialise auxiliary tasks robert@464: if((gFFTTask = Bela_createAuxiliaryTask(&process_fft_background, 90, "fft-calculation")) == 0) robert@464: return false; robert@464: rt_printf("You are listening to an FFT phase-vocoder with overlap-and-add.\n" robert@464: "Use Midi Control Change to control:\n" robert@464: "CC 2: effect type (bypass/robotization/whisperization)\n" robert@464: "CC 3: mix between recorded sample and live audio input\n" robert@464: "CC 4: mix between the unprocessed and processed sound\n" robert@464: "CC 5: gain\n" robert@464: ); robert@464: return true; robert@464: } robert@464: robert@464: // This function handles the FFT processing in this example once the buffer has robert@464: // been assembled. robert@464: void process_fft(float *inBuffer, int inWritePointer, float *outBuffer, int outWritePointer) robert@464: { robert@464: // Copy buffer into FFT input robert@464: int pointer = (inWritePointer - gFFTSize + BUFFER_SIZE) % BUFFER_SIZE; robert@464: for(int n = 0; n < gFFTSize; n++) { robert@464: timeDomainIn[n].r = (ne10_float32_t) inBuffer[pointer] * gWindowBuffer[n]; robert@464: timeDomainIn[n].i = 0; robert@464: robert@464: pointer++; robert@464: if(pointer >= BUFFER_SIZE) robert@464: pointer = 0; robert@464: } robert@464: robert@464: // Run the FFT robert@464: ne10_fft_c2c_1d_float32_neon (frequencyDomain, timeDomainIn, cfg, 0); robert@464: robert@464: switch (gEffect){ robert@464: case kRobot : robert@464: // Robotise the output robert@464: for(int n = 0; n < gFFTSize; n++) { robert@464: float amplitude = sqrtf(frequencyDomain[n].r * frequencyDomain[n].r + frequencyDomain[n].i * frequencyDomain[n].i); robert@464: frequencyDomain[n].r = amplitude; robert@464: frequencyDomain[n].i = 0; robert@464: } robert@464: break; robert@464: case kWhisper : robert@464: for(int n = 0; n < gFFTSize; n++) { robert@464: float amplitude = sqrtf(frequencyDomain[n].r * frequencyDomain[n].r + frequencyDomain[n].i * frequencyDomain[n].i); robert@464: float phase = rand()/(float)RAND_MAX * 2 * M_PI; robert@464: frequencyDomain[n].r = cosf(phase) * amplitude; robert@464: frequencyDomain[n].i = sinf(phase) * amplitude; robert@464: } robert@464: break; robert@464: case kBypass: robert@464: //bypass robert@464: break; robert@464: } robert@464: robert@464: // Run the inverse FFT robert@464: ne10_fft_c2c_1d_float32_neon (timeDomainOut, frequencyDomain, cfg, 1); robert@464: // Overlap-and-add timeDomainOut into the output buffer robert@464: pointer = outWritePointer; robert@464: for(int n = 0; n < gFFTSize; n++) { robert@464: outBuffer[pointer] += (timeDomainOut[n].r) * gFFTScaleFactor; robert@464: if(isnan(outBuffer[pointer])) robert@464: rt_printf("outBuffer OLA\n"); robert@464: pointer++; robert@464: if(pointer >= BUFFER_SIZE) robert@464: pointer = 0; robert@464: } robert@464: } robert@464: robert@464: // Function to process the FFT in a thread at lower priority robert@464: void process_fft_background() { robert@464: process_fft(gInputBuffer, gFFTInputBufferPointer, gOutputBuffer, gFFTOutputBufferPointer); robert@464: } robert@464: robert@464: // render() is called regularly at the highest priority by the audio engine. robert@464: // Input and output are given from the audio hardware and the other robert@464: // ADCs and DACs (if available). If only audio is available, numMatrixFrames robert@464: // will be 0. robert@464: void render(BelaContext* context, void* userData) robert@464: { robert@464: float* audioOut = context->audioOut; robert@464: int numAudioFrames = context->audioFrames; robert@464: int numAudioChannels = context->audioChannels; robert@464: // ------ this code internal to the demo; leave as is ---------------- robert@464: robert@464: // Prep the "input" to be the sound file played in a loop robert@464: for(int n = 0; n < numAudioFrames; n++) { robert@464: if(gReadPtr < gSampleData.sampleLen) robert@464: gInputAudio[2*n] = gInputAudio[2*n+1] = gSampleData.samples[gReadPtr]*(1-gPlaybackLive) + robert@464: gPlaybackLive*0.5f*(audioRead(context,n,0)+audioRead(context,n,1)); robert@464: else robert@464: gInputAudio[2*n] = gInputAudio[2*n+1] = 0; robert@464: if(++gReadPtr >= gSampleData.sampleLen) robert@464: gReadPtr = 0; robert@464: } robert@464: // ------------------------------------------------------------------- robert@464: robert@464: for(int n = 0; n < numAudioFrames; n++) { robert@464: gInputBuffer[gInputBufferPointer] = ((gInputAudio[n*numAudioChannels] + gInputAudio[n*numAudioChannels+1]) * 0.5); robert@464: robert@464: // Copy output buffer to output robert@464: for(int channel = 0; channel < numAudioChannels; channel++){ robert@464: audioOut[n * numAudioChannels + channel] = gOutputBuffer[gOutputBufferReadPointer] * gGain * gDryWet + (1 - gDryWet) * gInputAudio[n * numAudioChannels + channel]; robert@464: } robert@464: robert@464: // Clear the output sample in the buffer so it is ready for the next overlap-add robert@464: gOutputBuffer[gOutputBufferReadPointer] = 0; robert@464: gOutputBufferReadPointer++; robert@464: if(gOutputBufferReadPointer >= BUFFER_SIZE) robert@464: gOutputBufferReadPointer = 0; robert@464: gOutputBufferWritePointer++; robert@464: if(gOutputBufferWritePointer >= BUFFER_SIZE) robert@464: gOutputBufferWritePointer = 0; robert@464: robert@464: gInputBufferPointer++; robert@464: if(gInputBufferPointer >= BUFFER_SIZE) robert@464: gInputBufferPointer = 0; robert@464: robert@464: gSampleCount++; robert@464: if(gSampleCount >= gHopSize) { robert@464: //process_fft(gInputBuffer, gInputBufferPointer, gOutputBuffer, gOutputBufferPointer); robert@464: gFFTInputBufferPointer = gInputBufferPointer; robert@464: gFFTOutputBufferPointer = gOutputBufferWritePointer; robert@464: Bela_scheduleAuxiliaryTask(gFFTTask); robert@464: robert@464: gSampleCount = 0; robert@464: } robert@464: } robert@464: gHopSize = gPeriod; robert@464: } robert@464: robert@464: // cleanup_render() is called once at the end, after the audio has stopped. robert@464: // Release any resources that were allocated in initialise_render(). robert@464: robert@464: void cleanup(BelaContext* context, void* userData) robert@464: { robert@464: NE10_FREE(timeDomainIn); robert@464: NE10_FREE(timeDomainOut); robert@464: NE10_FREE(frequencyDomain); robert@464: NE10_FREE(cfg); robert@464: free(gInputAudio); robert@464: free(gWindowBuffer); robert@464: } robert@464: robert@464: robert@464: /** robert@500: \example FFT-phase-vocoder/render.cpp robert@464: robert@464: Phase Vocoder robert@464: ---------------------- robert@464: robert@464: This sketch shows an implementation of a phase vocoder and builds on the previous FFT example. robert@464: Again it uses the NE10 library, included at the top of the file. robert@464: robert@464: Read the documentation on the NE10 library [here](http://projectne10.github.io/Ne10/doc/annotated.html). robert@464: */