diff projects/scope/render.cpp @ 135:e77e2e712fbc ClockSync

To work with the ClockSync plugin
author Giulio Moro <giuliomoro@yahoo.it>
date Sat, 12 Sep 2015 20:05:55 +0100
parents 04b1678614c9
children 44d07fa9bd03
line wrap: on
line diff
--- a/projects/scope/render.cpp	Thu Aug 27 03:33:32 2015 +0100
+++ b/projects/scope/render.cpp	Sat Sep 12 20:05:55 2015 +0100
@@ -3,13 +3,14 @@
 #include <ReceiveAudioThread.h>
 #include <ClockSynchronizer.h>
 #include <cmath>
+#include <ClockSyncThread.h>
 
 float gPhase1, gPhase2;
 float gFrequency1, gFrequency2;
 float gInverseSampleRate;
 
 //Scope scope(2);   //create a scope object with 2 channels
-NetworkSend networkSend;
+//NetworkSend networkSend;
 
 // initialise_render() is called once before the audio rendering starts.
 // Use it to perform any initialisation and allocation which is dependent
@@ -19,20 +20,24 @@
 // in from the call to initAudio().
 //
 // Return true on success; returning false halts the program.
-ReceiveAudioThread receiveAudio0;
+//ReceiveAudioThread receiveAudio0;
 //ReceiveAudioThread receiveAudio1;
 ClockSynchronizer clockSynchronizer;
 extern I2c_Codec* gAudioCodec;
+VirtualClock virtualClock;
+ClockSyncThread clockSyncThread;
 bool setup(BeagleRTContext *context, void *userData)
 {
-	receiveAudio0.init(10000, context->audioFrames, 0);
+//	receiveAudio0.init(10000, context->audioFrames, 0);
 //	receiveAudio1.init(10000, context->audioFrames, 1);
 
 //	scope.setup();  //call this once in setup to initialise the scope
 //	scope.setPort(0, 9999);
 //	scope.setPort(1, 10000);
-	networkSend.setup(context->audioSampleRate, context->audioFrames, 0, 9999, "192.168.7.1");
+//	networkSend.setup(context->audioSampleRate, context->audioFrames, 0, 9999, "192.168.7.1");
 	clockSynchronizer.setup();
+	virtualClock.init();
+	clockSyncThread.init(true, 5000, virtualClock); //start as slave
 	gInverseSampleRate = 1.0/context->audioSampleRate;
 	
 	gPhase1 = 0.0;
@@ -51,10 +56,26 @@
 
 void render(BeagleRTContext *context, void *userData)
 {
+	virtualClock.sync(context->audioFrames);
 	static int count=0;
+	if(count==0)
+		clockSyncThread.startThread();
+	static float phase=0;
+	float phaseInc=200.0/44100.0*2*M_PI;
+//	rt_printf("phaseInc: %f, phase: %f\n",phaseInc,phase);
+	for(unsigned int n=0; n<context->audioFrames; n++){
+		context->audioOut[n*2]=sinf(phaseInc);//context->audioIn[n*2];
+		context->audioOut[n*2+1]=sinf(phaseInc);//context->audioIn[n*2];
+		phase+=200.0/44100.0*2*M_PI;
+		if(phase>=2*M_PI)
+			phase-=2*M_PI;
+//		context->audioOut[n*2+1]=rand()/(float)RAND_MAX;context->audioIn[n*2];
+	}
+	count++;
+	/*
 //	if((count&262143)==0){
 //	static int nextCall=160000;
-	if( ((count&(2047-1))==0 /*&& count>200000*/)){
+	if( ((count&(2047))==0)){
 //		rt_printf("b %d\n", count);
 		clockSynchronizer.update(networkSend.getTimestamp(), receiveAudio0.getTimestamp(), receiveAudio0.getLastTime());
 //		nextCall=count+100000;
@@ -64,7 +85,7 @@
 //		clockSynchronizer.update(networkSend.getTimestamp(), receiveAudio0.getTimestamp(), receiveAudio0.getLastTime());
 //	}
 	if(count==0){
-		gAudioCodec->setAudioSamplingRate( 44101);
+		gAudioCodec->setAudioSamplingRate( 44100);
 		rt_printf("startHread\n");
 		ReceiveAudioThread::startThread();
 	}
@@ -100,12 +121,15 @@
 			gPhase1 -= 2.0 * M_PI;
 		if(gPhase2 > 2.0 * M_PI)
 			gPhase2 -= 2.0 * M_PI;
+		int value=count%1000;
+		context->audioOut[n*2]=value>=500 ? 1 : -1;
+		context->audioOut[n*2+1]=context->audioOut[n*2];
 		count++;
 	}
 	if(count>0){
 		float samplingRateRatio=1;
 		int channelsInDestinationBuffer=2;
-		int channelToWriteTo=0;
+		int channelToWriteTo=1;
 		int length=receiveAudio0.getSamplesSrc(context->audioOut, context->audioFrames,
 				samplingRateRatio, channelsInDestinationBuffer, channelToWriteTo);
 		if((unsigned int)length!=context->audioFrames){
@@ -114,8 +138,10 @@
 //		int readPointer1=receiveAudio1.getSamplesSrc(context->audioOut, context->audioFrames, 1, 2, 1);
 	}
 	for(unsigned int n=0; n<context->audioFrames; n++){
-		context->audioOut[n*2+1]=context->audioOut[n*2];
+//		context->audioOut[n*2+1]=context->audioOut[n*2];
 	}
+	*/
+
 }
 
 // cleanup_render() is called once at the end, after the audio has stopped.