robert@372: /* robert@372: ____ _____ _ _ robert@372: | __ )| ____| | / \ robert@372: | _ \| _| | | / _ \ robert@372: | |_) | |___| |___ / ___ \ robert@372: |____/|_____|_____/_/ \_\.io robert@372: robert@372: */ robert@372: andrewm@0: /* andrewm@0: * render.cpp andrewm@0: * andrewm@0: * Created on: Oct 24, 2014 andrewm@0: * Author: parallels andrewm@0: */ andrewm@0: robert@372: /** robert@372: \example 1_basic_helloworld robert@372: robert@372: Producing your first bleep! robert@372: --------------------------- robert@372: robert@372: This sketch is the hello world of embedded interactive audio. Better known as bleep, it robert@372: produces a sine tone. robert@372: robert@372: The frequency of the sine tone is determined by a global variable, `gFrequency` robert@372: (line 12). The sine tone is produced by incrementing the phase of a sin function robert@372: on every audio frame. robert@372: robert@372: In render() you'll see a nested for loop structure. You'll see this in all Bela projects. robert@372: The first for loop cycles through 'audioFrames', the second through 'audioChannels' (in this case left 0 and right 1). robert@372: It is good to familiarise yourself with this structure as it's fundamental to producing sound with the system. robert@372: */ andrewm@0: giuliomoro@301: #include andrewm@0: #include andrewm@0: andrewm@56: float gFrequency = 440.0; andrewm@0: float gPhase; andrewm@0: float gInverseSampleRate; andrewm@0: andrewm@56: // setup() is called once before the audio rendering starts. andrewm@0: // Use it to perform any initialisation and allocation which is dependent andrewm@0: // on the period size or sample rate. andrewm@0: // andrewm@0: // userData holds an opaque pointer to a data structure that was passed andrewm@0: // in from the call to initAudio(). andrewm@0: // andrewm@0: // Return true on success; returning false halts the program. andrewm@0: giuliomoro@301: bool setup(BelaContext *context, void *userData) andrewm@0: { andrewm@0: // Retrieve a parameter passed in from the initAudio() call andrewm@56: if(userData != 0) andrewm@56: gFrequency = *(float *)userData; andrewm@0: andrewm@45: gInverseSampleRate = 1.0 / context->audioSampleRate; andrewm@0: gPhase = 0.0; andrewm@0: andrewm@0: return true; andrewm@0: } andrewm@0: andrewm@0: // render() is called regularly at the highest priority by the audio engine. andrewm@0: // Input and output are given from the audio hardware and the other andrewm@0: // ADCs and DACs (if available). If only audio is available, numMatrixFrames andrewm@0: // will be 0. andrewm@0: giuliomoro@301: void render(BelaContext *context, void *userData) andrewm@0: { andrewm@45: for(unsigned int n = 0; n < context->audioFrames; n++) { andrewm@0: float out = 0.8f * sinf(gPhase); andrewm@0: gPhase += 2.0 * M_PI * gFrequency * gInverseSampleRate; andrewm@0: if(gPhase > 2.0 * M_PI) andrewm@0: gPhase -= 2.0 * M_PI; andrewm@0: giuliomoro@180: for(unsigned int channel = 0; channel < context->audioChannels; channel++) { giuliomoro@180: // Two equivalent ways to write this code giuliomoro@180: giuliomoro@180: // The long way, using the buffers directly: giuliomoro@180: // context->audioOut[n * context->audioChannels + channel] = out; giuliomoro@180: giuliomoro@180: // Or using the macros: andrewm@308: audioWrite(context, n, channel, out); giuliomoro@180: } andrewm@0: } andrewm@0: } andrewm@0: andrewm@56: // cleanup() is called once at the end, after the audio has stopped. andrewm@56: // Release any resources that were allocated in setup(). andrewm@0: giuliomoro@301: void cleanup(BelaContext *context, void *userData) andrewm@0: { andrewm@0: andrewm@0: }