Mercurial > hg > beaglert
view projects/scope/render.cpp @ 135:e77e2e712fbc ClockSync
To work with the ClockSync plugin
author | Giulio Moro <giuliomoro@yahoo.it> |
---|---|
date | Sat, 12 Sep 2015 20:05:55 +0100 |
parents | 04b1678614c9 |
children | 44d07fa9bd03 |
line wrap: on
line source
#include <BeagleRT.h> #include <NetworkSend.h> #include <ReceiveAudioThread.h> #include <ClockSynchronizer.h> #include <cmath> #include <ClockSyncThread.h> float gPhase1, gPhase2; float gFrequency1, gFrequency2; float gInverseSampleRate; //Scope scope(2); //create a scope object with 2 channels //NetworkSend networkSend; // initialise_render() is called once before the audio rendering starts. // Use it to perform any initialisation and allocation which is dependent // on the period size or sample rate. // // userData holds an opaque pointer to a data structure that was passed // in from the call to initAudio(). // // Return true on success; returning false halts the program. //ReceiveAudioThread receiveAudio0; //ReceiveAudioThread receiveAudio1; ClockSynchronizer clockSynchronizer; extern I2c_Codec* gAudioCodec; VirtualClock virtualClock; ClockSyncThread clockSyncThread; bool setup(BeagleRTContext *context, void *userData) { // receiveAudio0.init(10000, context->audioFrames, 0); // receiveAudio1.init(10000, context->audioFrames, 1); // scope.setup(); //call this once in setup to initialise the scope // scope.setPort(0, 9999); // scope.setPort(1, 10000); // networkSend.setup(context->audioSampleRate, context->audioFrames, 0, 9999, "192.168.7.1"); clockSynchronizer.setup(); virtualClock.init(); clockSyncThread.init(true, 5000, virtualClock); //start as slave gInverseSampleRate = 1.0/context->audioSampleRate; gPhase1 = 0.0; gPhase2 = 0.0; gFrequency1 = 200.0; gFrequency2 = 201.0; return true; } // render() is called regularly at the highest priority by the audio engine. // Input and output are given from the audio hardware and the other // ADCs and DACs (if available). If only audio is available, numMatrixFrames // will be 0. void render(BeagleRTContext *context, void *userData) { virtualClock.sync(context->audioFrames); static int count=0; if(count==0) clockSyncThread.startThread(); static float phase=0; float phaseInc=200.0/44100.0*2*M_PI; // rt_printf("phaseInc: %f, phase: %f\n",phaseInc,phase); for(unsigned int n=0; n<context->audioFrames; n++){ context->audioOut[n*2]=sinf(phaseInc);//context->audioIn[n*2]; context->audioOut[n*2+1]=sinf(phaseInc);//context->audioIn[n*2]; phase+=200.0/44100.0*2*M_PI; if(phase>=2*M_PI) phase-=2*M_PI; // context->audioOut[n*2+1]=rand()/(float)RAND_MAX;context->audioIn[n*2]; } count++; /* // if((count&262143)==0){ // static int nextCall=160000; if( ((count&(2047))==0)){ // rt_printf("b %d\n", count); clockSynchronizer.update(networkSend.getTimestamp(), receiveAudio0.getTimestamp(), receiveAudio0.getLastTime()); // nextCall=count+100000; // rt_printf("a %d\n", count); } // if(count == nextCall){ // clockSynchronizer.update(networkSend.getTimestamp(), receiveAudio0.getTimestamp(), receiveAudio0.getLastTime()); // } if(count==0){ gAudioCodec->setAudioSamplingRate( 44100); rt_printf("startHread\n"); ReceiveAudioThread::startThread(); } for(unsigned int n = 0; n < context->audioFrames; n++) { float chn0 = sinf(gPhase1); // float chn1 = sinf(gPhase2); // float chn2 = context->audioIn[n*2 + 0]; // float chn3 = context->audioIn[n*2 + 1]; // float chn4 = context->analogIn[(int)n/2*8 + 0]; // float chn5 = context->analogIn[(int)n/2*8 + 1]; // networkSend.log(context->audioIn[n]); networkSend.log(chn0); // scope.log(0, chn0); // scope.log(1, chn1); // scope.log(2, chn2); // scope.log(3, chn3); // scope.log(4, chn4); // scope.log(5, chn5); // scope.log(chn1, chn2, chn3, chn4, chn5, chn6); //call this once every audio frame //takes six or fewer floats as parameters //first parameter becomes channel 1 etc //to view, click the 'oscilloscope' button on the toolbar while BeagleRT is NOT running //then click the big red button on the toolbar on this page gPhase1 += 2.0 * M_PI * gFrequency1 * gInverseSampleRate * ((count&65535)/65535.0+1); gPhase2 += 2.0 * M_PI * gFrequency2 * gInverseSampleRate; if(gPhase1 > 2.0 * M_PI) gPhase1 -= 2.0 * M_PI; if(gPhase2 > 2.0 * M_PI) gPhase2 -= 2.0 * M_PI; int value=count%1000; context->audioOut[n*2]=value>=500 ? 1 : -1; context->audioOut[n*2+1]=context->audioOut[n*2]; count++; } if(count>0){ float samplingRateRatio=1; int channelsInDestinationBuffer=2; int channelToWriteTo=1; int length=receiveAudio0.getSamplesSrc(context->audioOut, context->audioFrames, samplingRateRatio, channelsInDestinationBuffer, channelToWriteTo); if((unsigned int)length!=context->audioFrames){ rt_printf("Length mismatch: %d\n", length); } // int readPointer1=receiveAudio1.getSamplesSrc(context->audioOut, context->audioFrames, 1, 2, 1); } for(unsigned int n=0; n<context->audioFrames; n++){ // context->audioOut[n*2+1]=context->audioOut[n*2]; } */ } // cleanup_render() is called once at the end, after the audio has stopped. // Release any resources that were allocated in initialise_render(). void cleanup(BeagleRTContext *context, void *userData) { }