giuliomoro@94: #include giuliomoro@112: #include giuliomoro@117: #include giuliomoro@132: #include giuliomoro@94: #include giuliomoro@135: #include giuliomoro@94: giuliomoro@94: float gPhase1, gPhase2; giuliomoro@94: float gFrequency1, gFrequency2; giuliomoro@94: float gInverseSampleRate; giuliomoro@94: giuliomoro@128: //Scope scope(2); //create a scope object with 2 channels giuliomoro@135: //NetworkSend networkSend; giuliomoro@94: giuliomoro@94: // initialise_render() is called once before the audio rendering starts. giuliomoro@94: // Use it to perform any initialisation and allocation which is dependent giuliomoro@94: // on the period size or sample rate. giuliomoro@94: // giuliomoro@94: // userData holds an opaque pointer to a data structure that was passed giuliomoro@94: // in from the call to initAudio(). giuliomoro@94: // giuliomoro@94: // Return true on success; returning false halts the program. giuliomoro@135: //ReceiveAudioThread receiveAudio0; giuliomoro@128: //ReceiveAudioThread receiveAudio1; giuliomoro@132: ClockSynchronizer clockSynchronizer; giuliomoro@132: extern I2c_Codec* gAudioCodec; giuliomoro@135: VirtualClock virtualClock; giuliomoro@135: ClockSyncThread clockSyncThread; giuliomoro@94: bool setup(BeagleRTContext *context, void *userData) giuliomoro@94: { giuliomoro@135: // receiveAudio0.init(10000, context->audioFrames, 0); giuliomoro@128: // receiveAudio1.init(10000, context->audioFrames, 1); giuliomoro@131: giuliomoro@128: // scope.setup(); //call this once in setup to initialise the scope giuliomoro@128: // scope.setPort(0, 9999); giuliomoro@128: // scope.setPort(1, 10000); giuliomoro@135: // networkSend.setup(context->audioSampleRate, context->audioFrames, 0, 9999, "192.168.7.1"); giuliomoro@132: clockSynchronizer.setup(); giuliomoro@135: virtualClock.init(); giuliomoro@135: clockSyncThread.init(true, 5000, virtualClock); //start as slave giuliomoro@94: gInverseSampleRate = 1.0/context->audioSampleRate; giuliomoro@94: giuliomoro@94: gPhase1 = 0.0; giuliomoro@94: gPhase2 = 0.0; giuliomoro@94: giuliomoro@94: gFrequency1 = 200.0; giuliomoro@94: gFrequency2 = 201.0; giuliomoro@111: giuliomoro@94: return true; giuliomoro@94: } giuliomoro@94: giuliomoro@94: // render() is called regularly at the highest priority by the audio engine. giuliomoro@94: // Input and output are given from the audio hardware and the other giuliomoro@94: // ADCs and DACs (if available). If only audio is available, numMatrixFrames giuliomoro@94: // will be 0. giuliomoro@94: giuliomoro@94: void render(BeagleRTContext *context, void *userData) giuliomoro@94: { giuliomoro@135: virtualClock.sync(context->audioFrames); giuliomoro@109: static int count=0; giuliomoro@135: if(count==0) giuliomoro@135: clockSyncThread.startThread(); giuliomoro@135: static float phase=0; giuliomoro@135: float phaseInc=200.0/44100.0*2*M_PI; giuliomoro@135: // rt_printf("phaseInc: %f, phase: %f\n",phaseInc,phase); giuliomoro@135: for(unsigned int n=0; naudioFrames; n++){ giuliomoro@135: context->audioOut[n*2]=sinf(phaseInc);//context->audioIn[n*2]; giuliomoro@135: context->audioOut[n*2+1]=sinf(phaseInc);//context->audioIn[n*2]; giuliomoro@135: phase+=200.0/44100.0*2*M_PI; giuliomoro@135: if(phase>=2*M_PI) giuliomoro@135: phase-=2*M_PI; giuliomoro@135: // context->audioOut[n*2+1]=rand()/(float)RAND_MAX;context->audioIn[n*2]; giuliomoro@135: } giuliomoro@135: count++; giuliomoro@135: /* giuliomoro@132: // if((count&262143)==0){ giuliomoro@132: // static int nextCall=160000; giuliomoro@135: if( ((count&(2047))==0)){ giuliomoro@132: // rt_printf("b %d\n", count); giuliomoro@132: clockSynchronizer.update(networkSend.getTimestamp(), receiveAudio0.getTimestamp(), receiveAudio0.getLastTime()); giuliomoro@132: // nextCall=count+100000; giuliomoro@132: // rt_printf("a %d\n", count); giuliomoro@132: } giuliomoro@132: // if(count == nextCall){ giuliomoro@132: // clockSynchronizer.update(networkSend.getTimestamp(), receiveAudio0.getTimestamp(), receiveAudio0.getLastTime()); giuliomoro@132: // } giuliomoro@131: if(count==0){ giuliomoro@135: gAudioCodec->setAudioSamplingRate( 44100); giuliomoro@133: rt_printf("startHread\n"); giuliomoro@131: ReceiveAudioThread::startThread(); giuliomoro@131: } giuliomoro@94: for(unsigned int n = 0; n < context->audioFrames; n++) { giuliomoro@94: giuliomoro@111: float chn0 = sinf(gPhase1); giuliomoro@131: // float chn1 = sinf(gPhase2); giuliomoro@111: giuliomoro@118: // float chn2 = context->audioIn[n*2 + 0]; giuliomoro@118: // float chn3 = context->audioIn[n*2 + 1]; giuliomoro@111: giuliomoro@118: // float chn4 = context->analogIn[(int)n/2*8 + 0]; giuliomoro@118: // float chn5 = context->analogIn[(int)n/2*8 + 1]; giuliomoro@132: // networkSend.log(context->audioIn[n]); giuliomoro@132: networkSend.log(chn0); giuliomoro@131: // scope.log(0, chn0); giuliomoro@131: // scope.log(1, chn1); giuliomoro@118: // scope.log(2, chn2); giuliomoro@118: // scope.log(3, chn3); giuliomoro@118: // scope.log(4, chn4); giuliomoro@118: // scope.log(5, chn5); giuliomoro@94: giuliomoro@94: // scope.log(chn1, chn2, chn3, chn4, chn5, chn6); giuliomoro@94: //call this once every audio frame giuliomoro@94: //takes six or fewer floats as parameters giuliomoro@94: //first parameter becomes channel 1 etc giuliomoro@94: //to view, click the 'oscilloscope' button on the toolbar while BeagleRT is NOT running giuliomoro@94: //then click the big red button on the toolbar on this page giuliomoro@94: giuliomoro@132: gPhase1 += 2.0 * M_PI * gFrequency1 * gInverseSampleRate * ((count&65535)/65535.0+1); giuliomoro@118: gPhase2 += 2.0 * M_PI * gFrequency2 * gInverseSampleRate; giuliomoro@94: if(gPhase1 > 2.0 * M_PI) giuliomoro@94: gPhase1 -= 2.0 * M_PI; giuliomoro@94: if(gPhase2 > 2.0 * M_PI) giuliomoro@94: gPhase2 -= 2.0 * M_PI; giuliomoro@135: int value=count%1000; giuliomoro@135: context->audioOut[n*2]=value>=500 ? 1 : -1; giuliomoro@135: context->audioOut[n*2+1]=context->audioOut[n*2]; giuliomoro@132: count++; giuliomoro@94: } giuliomoro@118: if(count>0){ giuliomoro@131: float samplingRateRatio=1; giuliomoro@131: int channelsInDestinationBuffer=2; giuliomoro@135: int channelToWriteTo=1; giuliomoro@131: int length=receiveAudio0.getSamplesSrc(context->audioOut, context->audioFrames, giuliomoro@131: samplingRateRatio, channelsInDestinationBuffer, channelToWriteTo); giuliomoro@132: if((unsigned int)length!=context->audioFrames){ giuliomoro@131: rt_printf("Length mismatch: %d\n", length); giuliomoro@131: } giuliomoro@128: // int readPointer1=receiveAudio1.getSamplesSrc(context->audioOut, context->audioFrames, 1, 2, 1); giuliomoro@119: } giuliomoro@132: for(unsigned int n=0; naudioFrames; n++){ giuliomoro@135: // context->audioOut[n*2+1]=context->audioOut[n*2]; giuliomoro@131: } giuliomoro@135: */ giuliomoro@135: giuliomoro@94: } giuliomoro@94: giuliomoro@94: // cleanup_render() is called once at the end, after the audio has stopped. giuliomoro@94: // Release any resources that were allocated in initialise_render(). giuliomoro@94: giuliomoro@94: void cleanup(BeagleRTContext *context, void *userData) giuliomoro@94: { giuliomoro@94: giuliomoro@94: }