Mercurial > hg > beaglert
diff examples/basic_FFT_phase_vocoder/render.cpp @ 375:768acdeea362 prerelease
Merge
author | Giulio Moro <giuliomoro@yahoo.it> |
---|---|
date | Fri, 10 Jun 2016 00:35:18 +0100 |
parents | 3bed6b09223c |
children | 24c3a0663d54 |
line wrap: on
line diff
--- a/examples/basic_FFT_phase_vocoder/render.cpp Fri Jun 10 00:02:48 2016 +0100 +++ b/examples/basic_FFT_phase_vocoder/render.cpp Fri Jun 10 00:35:18 2016 +0100 @@ -1,3 +1,12 @@ +/* + ____ _____ _ _ +| __ )| ____| | / \ +| _ \| _| | | / _ \ +| |_) | |___| |___ / ___ \ +|____/|_____|_____/_/ \_\.io + + */ + /* * render.cpp * @@ -5,6 +14,17 @@ * Author: parallels */ +/** +\example 4_audio_FFT_phase_vocoder + +Phase Vocoder +---------------------- + +This sketch shows an implementation of a phase vocoder and builds on the previous FFT example. +Again it uses the NE10 library, included at the top of the file (line 11). + +Read the documentation on the NE10 library [here](http://projectne10.github.io/Ne10/doc/annotated.html). +*/ #include <Bela.h> #include <rtdk.h> @@ -60,7 +80,10 @@ float gDryWet = 1; // mix between the unprocessed and processed sound float gPlaybackLive = 0.5f; // mix between the file playback and the live audio input float gGain = 1; // overall gain +float *gInputAudio = NULL; Midi midi; + + void midiCallback(MidiChannelMessage message, void* arg){ if(message.getType() == kmmNoteOn){ if(message.getDataByte(1) > 0){ @@ -113,11 +136,16 @@ timeDomainIn = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); timeDomainOut = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); frequencyDomain = (ne10_fft_cpx_float32_t*) NE10_MALLOC (gFFTSize * sizeof (ne10_fft_cpx_float32_t)); - cfg = ne10_fft_alloc_c2c_float32 (gFFTSize); + cfg = ne10_fft_alloc_c2c_float32_neon (gFFTSize); memset(timeDomainOut, 0, gFFTSize * sizeof (ne10_fft_cpx_float32_t)); memset(gOutputBuffer, 0, BUFFER_SIZE * sizeof(float)); + // Allocate buffer to mirror and modify the input + gInputAudio = (float *)malloc(context->audioFrames * context->audioChannels * sizeof(float)); + if(gInputAudio == 0) + return false; + // Allocate the window buffer based on the FFT size gWindowBuffer = (float *)malloc(gFFTSize * sizeof(float)); if(gWindowBuffer == 0) @@ -157,7 +185,7 @@ } // Run the FFT - ne10_fft_c2c_1d_float32_neon (frequencyDomain, timeDomainIn, cfg->twiddles, cfg->factors, gFFTSize, 0); + ne10_fft_c2c_1d_float32_neon (frequencyDomain, timeDomainIn, cfg, 0); switch (gEffect){ case kRobot : @@ -182,7 +210,7 @@ } // Run the inverse FFT - ne10_fft_c2c_1d_float32_neon (timeDomainOut, frequencyDomain, cfg->twiddles, cfg->factors, gFFTSize, 1); + ne10_fft_c2c_1d_float32_neon (timeDomainOut, frequencyDomain, cfg, 1); // Overlap-and-add timeDomainOut into the output buffer pointer = outWritePointer; for(int n = 0; n < gFFTSize; n++) { @@ -206,7 +234,6 @@ // will be 0. void render(BelaContext* context, void* userData) { - float* audioIn = context->audioIn; float* audioOut = context->audioOut; int numAudioFrames = context->audioFrames; int numAudioChannels = context->audioChannels; @@ -215,21 +242,21 @@ // Prep the "input" to be the sound file played in a loop for(int n = 0; n < numAudioFrames; n++) { if(gReadPtr < gSampleData.sampleLen) - audioIn[2*n] = audioIn[2*n+1] = gSampleData.samples[gReadPtr]*(1-gPlaybackLive) + + gInputAudio[2*n] = gInputAudio[2*n+1] = gSampleData.samples[gReadPtr]*(1-gPlaybackLive) + gPlaybackLive*0.5f*(audioRead(context,n,0)+audioRead(context,n,1)); else - audioIn[2*n] = audioIn[2*n+1] = 0; + gInputAudio[2*n] = gInputAudio[2*n+1] = 0; if(++gReadPtr >= gSampleData.sampleLen) gReadPtr = 0; } // ------------------------------------------------------------------- for(int n = 0; n < numAudioFrames; n++) { - gInputBuffer[gInputBufferPointer] = ((audioIn[n*numAudioChannels] + audioIn[n*numAudioChannels+1]) * 0.5); + gInputBuffer[gInputBufferPointer] = ((gInputAudio[n*numAudioChannels] + gInputAudio[n*numAudioChannels+1]) * 0.5); // Copy output buffer to output for(int channel = 0; channel < numAudioChannels; channel++){ - audioOut[n * numAudioChannels + channel] = gOutputBuffer[gOutputBufferReadPointer] * gGain * gDryWet + (1 - gDryWet) * audioIn[n * numAudioChannels + channel]; + audioOut[n * numAudioChannels + channel] = gOutputBuffer[gOutputBufferReadPointer] * gGain * gDryWet + (1 - gDryWet) * gInputAudio[n * numAudioChannels + channel]; } // Clear the output sample in the buffer so it is ready for the next overlap-add @@ -267,5 +294,6 @@ NE10_FREE(timeDomainOut); NE10_FREE(frequencyDomain); NE10_FREE(cfg); + free(gInputAudio); free(gWindowBuffer); }