Mercurial > hg > beaglert
view projects/basic_network/render.cpp @ 190:3b8a28edae41
- Updated heavy scripts to work with custom header
- Added -mfpu=neon flag to Makefile for custom .C files
author | chnrx <chris.heinrichs@gmail.com> |
---|---|
date | Wed, 27 Jan 2016 19:13:57 +0000 |
parents | a55dcdcebdcd |
children | 3068421c0737 dbff109f64c2 |
line wrap: on
line source
/* * render.cpp * * Created on: Oct 24, 2014 * Author: parallels */ #include <BeagleRT.h> //#include <rtdk.h> #include <cmath> #include <UdpClient.h> #include <Utilities.h> AuxiliaryTask transmitReceiveDataTask; #define NETWORK_AUDIO_BUFFER_SIZE 400 //1400/4 //maximum payload for a UDP datagram over ethernet is 1472 bytes, I leave some headroom and divide by 4 to get the number of floats struct networkAudio{ int timestamp; int currentBuffer; int index; float buffers[2][NETWORK_AUDIO_BUFFER_SIZE]; int doneOnTime; bool toBeSent; UdpClient udpClient; }; float gFrequency; float gPhase; float gInverseSampleRate; int gCount=0; //networkData networkObject; #define numNetAudio 3 networkAudio netAudio[numNetAudio]; AuxiliaryTask printIntervalTask; AuxiliaryTask transmitReceiveAudioTask; void transmitReceiveAudio(){ //transmit and receive audio buffers for(int n=0;n<numNetAudio; n++){ if(netAudio[n].toBeSent){ netAudio[n].toBeSent=false; netAudio[n].udpClient.send(netAudio[n].buffers[!netAudio[n].currentBuffer],NETWORK_AUDIO_BUFFER_SIZE*sizeof(float)); netAudio[n].doneOnTime=1; } } } // setup() is called once before the audio rendering starts. // Use it to perform any initialisation and allocation which is dependent // on the period size or sample rate. // // userData holds an opaque pointer to a data structure that was passed // in from the call to initAudio(). // // Return true on success; returning false halts the program. bool setup(BeagleRTContext *context, void *userData) { // Retrieve a parameter passed in from the initAudio() call gFrequency = *(float *)userData; gInverseSampleRate = 1.0 / context->audioSampleRate; gPhase = 0.0; // networkObject.counter=&gCount; // networkObject.variables[0]=&gFrequency; // networkObject.variables[1]=&gPhase; // networkObject.numVariables=2; for(int n=0; n<numNetAudio; n++){ netAudio[n].doneOnTime=1; netAudio[n].index=0; netAudio[n].currentBuffer=0; netAudio[n].toBeSent=false; // netAudio[n].udpClient.setPort(settings->transmitPort+n); // netAudio[n].udpClient.setServer(settings->serverName); netAudio[n].udpClient.setPort(9999+n); netAudio[n].udpClient.setServer("192.168.7.1"); } // setupSockets(settings->receivePort, settings->transmitPort, settings->serverName); // transmitReceiveDataTask=createAuxiliaryTask(*transmitReceiveData, 10, "transmit-receive-data"); // scheduleAuxiliaryTask(transmitReceiveDataTask); //here it does not work transmitReceiveAudioTask=BeagleRT_createAuxiliaryTask(*transmitReceiveAudio, 98, "transmit-receive-audio"); return true; } // render() is called regularly at the highest priority by the audio engine. // Input and output are given from the audio hardware and the other // ADCs and DACs (if available). If only audio is available, numMatrixFrames // will be 0. void render(BeagleRTContext *context, void *userData) {/* for(unsigned int n = 0; n < context->audioFrames; n++) { float out = 0.7f * sinf(gPhase); gPhase += 2.0 * M_PI * gFrequency * gInverseSampleRate; if(gPhase > 2.0 * M_PI) gPhase -= 2.0 * M_PI; for(unsigned int channel = 0; channel < context->audioChannels; channel++) context->audioOut[n * context->audioChannels + channel] = out; if(gCount == 0){ BeagleRT_scheduleAuxiliaryTask(transmitReceiveDataTask); } gCount++; } */ for(int n = 0; n < context->audioFrames; n++) { float out = 0.7f * sinf(gPhase); gPhase += 2.0 * M_PI * gFrequency * gInverseSampleRate; if(gPhase > 2.0 * M_PI) gPhase -= 2.0 * M_PI; // for(int channel = 0; channel < context->audioChannels; channel++) // context->audioOut[n * context->audioChannels + channel] = context->audioIn[n * context->audioChannels + 0]+context->audioIn[n * context->audioChannels + 1]; context->audioOut[n * context->audioChannels] = context->audioIn[n*context->audioChannels+0]; context->audioOut[n * context->audioChannels+1]=out; if(0==gCount){ // scheduleAuxiliaryTask(transmitReceiveDataTask); } for(int j=0; j<numNetAudio; j++){ if(netAudio[j].index==(NETWORK_AUDIO_BUFFER_SIZE)){ // when the buffer is ready ... netAudio[j].toBeSent=true; netAudio[j].index=0; //reset the counter if(netAudio[j].doneOnTime==0) rt_printf("Network buffer underrun :-{\n"); netAudio[j].timestamp=gCount; netAudio[j].currentBuffer=!netAudio[j].currentBuffer; //switch buffer netAudio[j].doneOnTime=0; BeagleRT_scheduleAuxiliaryTask(transmitReceiveAudioTask); //send the buffer } } if((gCount&1)==0){ netAudio[1].buffers[netAudio[1].currentBuffer][netAudio[1].index++]=analogReadFrame(context,n/2,0)+context->audioOut[n*context->audioChannels + 0]; netAudio[2].buffers[netAudio[2].currentBuffer][netAudio[2].index++]=analogReadFrame(context,n/2,1)+context->audioOut[n*context->audioChannels + 0]; } netAudio[0].buffers[netAudio[0].currentBuffer][netAudio[0].index++]=0.5*(out+context->audioOut[n*context->audioChannels + 0]);//copy channel 0 to the buffer // netAudio[1].buffers[netAudio[1].currentBuffer][netAudio[1].index++]=0.5*(out+context->audioOut[n*context->audioChannels + 0]); // netAudio[2].buffers[netAudio[2].currentBuffer][netAudio[2].index++]=0.5*(out+context->audioOut[n*context->audioChannels + 0]); gCount++; } } // cleanup() is called once at the end, after the audio has stopped. // Release any resources that were allocated in setup(). void cleanup(BeagleRTContext *context, void *userData) { }