robert@372: /* robert@372: ____ _____ _ _ robert@372: | __ )| ____| | / \ robert@372: | _ \| _| | | / _ \ robert@372: | |_) | |___| |___ / ___ \ robert@372: |____/|_____|_____/_/ \_\.io robert@372: robert@372: */ robert@372: giuliomoro@250: /* giuliomoro@250: * render.cpp giuliomoro@250: * giuliomoro@250: * Created on: Oct 24, 2014 giuliomoro@250: * Author: parallels giuliomoro@250: */ giuliomoro@250: robert@372: /** robert@372: \example 4_audio_FFT_phase_vocoder robert@372: robert@372: Phase Vocoder robert@372: ---------------------- robert@372: robert@372: This sketch shows an implementation of a phase vocoder and builds on the previous FFT example. robert@372: Again it uses the NE10 library, included at the top of the file (line 11). robert@372: robert@372: Read the documentation on the NE10 library [here](http://projectne10.github.io/Ne10/doc/annotated.html). robert@372: */ giuliomoro@250: giuliomoro@301: #include giuliomoro@250: #include giuliomoro@250: #include // NEON FFT library giuliomoro@250: #include giuliomoro@250: #include "SampleData.h" giuliomoro@250: #include giuliomoro@250: giuliomoro@250: #define BUFFER_SIZE 16384 giuliomoro@250: giuliomoro@250: // TODO: your buffer and counter go here! giuliomoro@250: float gInputBuffer[BUFFER_SIZE]; giuliomoro@250: int gInputBufferPointer = 0; giuliomoro@250: float gOutputBuffer[BUFFER_SIZE]; giuliomoro@250: int gOutputBufferWritePointer = 0; giuliomoro@250: int gOutputBufferReadPointer = 0; giuliomoro@250: int gSampleCount = 0; giuliomoro@250: giuliomoro@250: float *gWindowBuffer; giuliomoro@250: giuliomoro@250: // ----------------------------------------------- giuliomoro@250: // These variables used internally in the example: giuliomoro@250: int gFFTSize = 2048; giuliomoro@250: int gHopSize = 512; giuliomoro@250: int gPeriod = 512; giuliomoro@250: float gFFTScaleFactor = 0; giuliomoro@250: giuliomoro@250: // FFT vars giuliomoro@250: ne10_fft_cpx_float32_t* timeDomainIn; giuliomoro@250: ne10_fft_cpx_float32_t* timeDomainOut; giuliomoro@250: ne10_fft_cpx_float32_t* frequencyDomain; giuliomoro@250: ne10_fft_cfg_float32_t cfg; giuliomoro@250: giuliomoro@250: // Sample info giuliomoro@250: SampleData gSampleData; // User defined structure to get complex data from main giuliomoro@250: int gReadPtr = 0; // Position of last read sample from file giuliomoro@250: giuliomoro@250: // Auxiliary task for calculating FFT giuliomoro@250: AuxiliaryTask gFFTTask; giuliomoro@250: int gFFTInputBufferPointer = 0; giuliomoro@250: int gFFTOutputBufferPointer = 0; giuliomoro@250: giuliomoro@250: void process_fft_background(); giuliomoro@250: giuliomoro@250: giuliomoro@250: int gEffect = 0; // change this here or with midi CC giuliomoro@250: enum{ giuliomoro@250: kBypass, giuliomoro@250: kRobot, giuliomoro@250: kWhisper, giuliomoro@250: }; giuliomoro@250: giuliomoro@250: float gDryWet = 1; // mix between the unprocessed and processed sound giuliomoro@250: float gPlaybackLive = 0.5f; // mix between the file playback and the live audio input giuliomoro@250: float gGain = 1; // overall gain andrewm@373: float *gInputAudio = NULL; giuliomoro@250: Midi midi; andrewm@373: andrewm@373: giuliomoro@250: void midiCallback(MidiChannelMessage message, void* arg){ giuliomoro@250: if(message.getType() == kmmNoteOn){ giuliomoro@250: if(message.getDataByte(1) > 0){ giuliomoro@250: int note = message.getDataByte(0); giuliomoro@250: float frequency = powf(2, (note-69)/12.f)*440; giuliomoro@250: gPeriod = (int)(44100 / frequency + 0.5); giuliomoro@250: printf("\nnote: %d, frequency: %f, hop: %d\n", note, frequency, gPeriod); giuliomoro@250: } giuliomoro@250: } giuliomoro@250: giuliomoro@250: bool shouldPrint = false; giuliomoro@250: if(message.getType() == kmmControlChange){ giuliomoro@250: float data = message.getDataByte(1) / 127.0f; giuliomoro@250: switch (message.getDataByte(0)){ giuliomoro@250: case 2 : giuliomoro@250: gEffect = (int)(data * 2 + 0.5); // CC2 selects an effect between 0,1,2 giuliomoro@250: break; giuliomoro@250: case 3 : giuliomoro@250: gPlaybackLive = data; giuliomoro@250: break; giuliomoro@250: case 4 : giuliomoro@250: gDryWet = data; giuliomoro@250: break; giuliomoro@250: case 5: giuliomoro@250: gGain = data*10; giuliomoro@250: break; giuliomoro@250: default: giuliomoro@250: shouldPrint = true; giuliomoro@250: } giuliomoro@250: } giuliomoro@250: if(shouldPrint){ giuliomoro@250: message.prettyPrint(); giuliomoro@250: } giuliomoro@250: } giuliomoro@250: giuliomoro@250: // userData holds an opaque pointer to a data structure that was passed giuliomoro@250: // in from the call to initAudio(). giuliomoro@250: // giuliomoro@250: // Return true on success; returning false halts the program. giuliomoro@301: bool setup(BelaContext* context, void* userData) giuliomoro@250: { giuliomoro@250: midi.readFrom(0); giuliomoro@250: midi.setParserCallback(midiCallback); giuliomoro@250: // Retrieve a parameter passed in from the initAudio() call giuliomoro@250: gSampleData = *(SampleData *)userData; giuliomoro@250: giuliomoro@250: gFFTScaleFactor = 1.0f / (float)gFFTSize; giuliomoro@250: gOutputBufferWritePointer += gHopSize; giuliomoro@250: giuliomoro@250: timeDomainIn = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); giuliomoro@250: timeDomainOut = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); giuliomoro@250: frequencyDomain = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); andrewm@373: cfg = ne10_fft_alloc_c2c_float32_neon (gFFTSize); giuliomoro@250: giuliomoro@250: memset(timeDomainOut, 0, gFFTSize * sizeof (ne10_fft_cpx_float32_t)); giuliomoro@250: memset(gOutputBuffer, 0, BUFFER_SIZE * sizeof(float)); giuliomoro@250: andrewm@373: // Allocate buffer to mirror and modify the input andrewm@373: gInputAudio = (float *)malloc(context->audioFrames * context->audioChannels * sizeof(float)); andrewm@373: if(gInputAudio == 0) andrewm@373: return false; andrewm@373: giuliomoro@250: // Allocate the window buffer based on the FFT size giuliomoro@250: gWindowBuffer = (float *)malloc(gFFTSize * sizeof(float)); giuliomoro@250: if(gWindowBuffer == 0) giuliomoro@250: return false; giuliomoro@250: giuliomoro@250: // Calculate a Hann window giuliomoro@250: for(int n = 0; n < gFFTSize; n++) { giuliomoro@250: gWindowBuffer[n] = 0.5f * (1.0f - cosf(2.0 * M_PI * n / (float)(gFFTSize - 1))); giuliomoro@250: } giuliomoro@250: giuliomoro@250: // Initialise auxiliary tasks giuliomoro@301: if((gFFTTask = Bela_createAuxiliaryTask(&process_fft_background, 90, "fft-calculation")) == 0) giuliomoro@250: return false; giuliomoro@251: rt_printf("You are listening to an FFT phase-vocoder with overlap-and-add.\n" giuliomoro@250: "Use Midi Control Change to control:\n" giuliomoro@251: "CC 2: effect type (bypass/robotization/whisperization)\n" giuliomoro@251: "CC 3: mix between recorded sample and live audio input\n" giuliomoro@251: "CC 4: mix between the unprocessed and processed sound\n" giuliomoro@251: "CC 5: gain\n" giuliomoro@250: ); giuliomoro@250: return true; giuliomoro@250: } giuliomoro@250: giuliomoro@250: // This function handles the FFT processing in this example once the buffer has giuliomoro@250: // been assembled. giuliomoro@250: void process_fft(float *inBuffer, int inWritePointer, float *outBuffer, int outWritePointer) giuliomoro@250: { giuliomoro@250: // Copy buffer into FFT input giuliomoro@250: int pointer = (inWritePointer - gFFTSize + BUFFER_SIZE) % BUFFER_SIZE; giuliomoro@250: for(int n = 0; n < gFFTSize; n++) { giuliomoro@250: timeDomainIn[n].r = (ne10_float32_t) inBuffer[pointer] * gWindowBuffer[n]; giuliomoro@250: timeDomainIn[n].i = 0; giuliomoro@250: giuliomoro@250: pointer++; giuliomoro@250: if(pointer >= BUFFER_SIZE) giuliomoro@250: pointer = 0; giuliomoro@250: } giuliomoro@250: giuliomoro@250: // Run the FFT andrewm@373: ne10_fft_c2c_1d_float32_neon (frequencyDomain, timeDomainIn, cfg, 0); giuliomoro@250: giuliomoro@250: switch (gEffect){ giuliomoro@250: case kRobot : giuliomoro@250: // Robotise the output giuliomoro@250: for(int n = 0; n < gFFTSize; n++) { giuliomoro@250: float amplitude = sqrtf(frequencyDomain[n].r * frequencyDomain[n].r + frequencyDomain[n].i * frequencyDomain[n].i); giuliomoro@250: frequencyDomain[n].r = amplitude; giuliomoro@250: frequencyDomain[n].i = 0; giuliomoro@250: } giuliomoro@250: break; giuliomoro@250: case kWhisper : giuliomoro@250: for(int n = 0; n < gFFTSize; n++) { giuliomoro@250: float amplitude = sqrtf(frequencyDomain[n].r * frequencyDomain[n].r + frequencyDomain[n].i * frequencyDomain[n].i); giuliomoro@250: float phase = rand()/(float)RAND_MAX * 2 * M_PI; giuliomoro@250: frequencyDomain[n].r = cosf(phase) * amplitude; giuliomoro@250: frequencyDomain[n].i = sinf(phase) * amplitude; giuliomoro@250: } giuliomoro@250: break; giuliomoro@250: case kBypass: giuliomoro@250: //bypass giuliomoro@250: break; giuliomoro@250: } giuliomoro@250: giuliomoro@250: // Run the inverse FFT andrewm@373: ne10_fft_c2c_1d_float32_neon (timeDomainOut, frequencyDomain, cfg, 1); giuliomoro@250: // Overlap-and-add timeDomainOut into the output buffer giuliomoro@250: pointer = outWritePointer; giuliomoro@250: for(int n = 0; n < gFFTSize; n++) { giuliomoro@250: outBuffer[pointer] += (timeDomainOut[n].r) * gFFTScaleFactor; giuliomoro@250: if(isnan(outBuffer[pointer])) giuliomoro@250: rt_printf("outBuffer OLA\n"); giuliomoro@250: pointer++; giuliomoro@250: if(pointer >= BUFFER_SIZE) giuliomoro@250: pointer = 0; giuliomoro@250: } giuliomoro@250: } giuliomoro@250: giuliomoro@250: // Function to process the FFT in a thread at lower priority giuliomoro@250: void process_fft_background() { giuliomoro@250: process_fft(gInputBuffer, gFFTInputBufferPointer, gOutputBuffer, gFFTOutputBufferPointer); giuliomoro@250: } giuliomoro@250: giuliomoro@250: // render() is called regularly at the highest priority by the audio engine. giuliomoro@250: // Input and output are given from the audio hardware and the other giuliomoro@250: // ADCs and DACs (if available). If only audio is available, numMatrixFrames giuliomoro@250: // will be 0. giuliomoro@301: void render(BelaContext* context, void* userData) giuliomoro@250: { giuliomoro@250: float* audioOut = context->audioOut; giuliomoro@250: int numAudioFrames = context->audioFrames; giuliomoro@250: int numAudioChannels = context->audioChannels; giuliomoro@250: // ------ this code internal to the demo; leave as is ---------------- giuliomoro@250: giuliomoro@250: // Prep the "input" to be the sound file played in a loop giuliomoro@250: for(int n = 0; n < numAudioFrames; n++) { giuliomoro@250: if(gReadPtr < gSampleData.sampleLen) andrewm@373: gInputAudio[2*n] = gInputAudio[2*n+1] = gSampleData.samples[gReadPtr]*(1-gPlaybackLive) + andrewm@308: gPlaybackLive*0.5f*(audioRead(context,n,0)+audioRead(context,n,1)); giuliomoro@250: else andrewm@373: gInputAudio[2*n] = gInputAudio[2*n+1] = 0; giuliomoro@250: if(++gReadPtr >= gSampleData.sampleLen) giuliomoro@250: gReadPtr = 0; giuliomoro@250: } giuliomoro@250: // ------------------------------------------------------------------- giuliomoro@250: giuliomoro@250: for(int n = 0; n < numAudioFrames; n++) { andrewm@373: gInputBuffer[gInputBufferPointer] = ((gInputAudio[n*numAudioChannels] + gInputAudio[n*numAudioChannels+1]) * 0.5); giuliomoro@250: giuliomoro@250: // Copy output buffer to output giuliomoro@250: for(int channel = 0; channel < numAudioChannels; channel++){ andrewm@373: audioOut[n * numAudioChannels + channel] = gOutputBuffer[gOutputBufferReadPointer] * gGain * gDryWet + (1 - gDryWet) * gInputAudio[n * numAudioChannels + channel]; giuliomoro@250: } giuliomoro@250: giuliomoro@250: // Clear the output sample in the buffer so it is ready for the next overlap-add giuliomoro@250: gOutputBuffer[gOutputBufferReadPointer] = 0; giuliomoro@250: gOutputBufferReadPointer++; giuliomoro@250: if(gOutputBufferReadPointer >= BUFFER_SIZE) giuliomoro@250: gOutputBufferReadPointer = 0; giuliomoro@250: gOutputBufferWritePointer++; giuliomoro@250: if(gOutputBufferWritePointer >= BUFFER_SIZE) giuliomoro@250: gOutputBufferWritePointer = 0; giuliomoro@250: giuliomoro@250: gInputBufferPointer++; giuliomoro@250: if(gInputBufferPointer >= BUFFER_SIZE) giuliomoro@250: gInputBufferPointer = 0; giuliomoro@250: giuliomoro@250: gSampleCount++; giuliomoro@250: if(gSampleCount >= gHopSize) { giuliomoro@250: //process_fft(gInputBuffer, gInputBufferPointer, gOutputBuffer, gOutputBufferPointer); giuliomoro@250: gFFTInputBufferPointer = gInputBufferPointer; giuliomoro@250: gFFTOutputBufferPointer = gOutputBufferWritePointer; giuliomoro@301: Bela_scheduleAuxiliaryTask(gFFTTask); giuliomoro@250: giuliomoro@250: gSampleCount = 0; giuliomoro@250: } giuliomoro@250: } giuliomoro@250: gHopSize = gPeriod; giuliomoro@250: } giuliomoro@250: giuliomoro@250: // cleanup_render() is called once at the end, after the audio has stopped. giuliomoro@250: // Release any resources that were allocated in initialise_render(). giuliomoro@250: giuliomoro@301: void cleanup(BelaContext* context, void* userData) giuliomoro@250: { giuliomoro@250: NE10_FREE(timeDomainIn); giuliomoro@250: NE10_FREE(timeDomainOut); giuliomoro@250: NE10_FREE(frequencyDomain); giuliomoro@250: NE10_FREE(cfg); andrewm@373: free(gInputAudio); giuliomoro@250: free(gWindowBuffer); giuliomoro@250: }