diff projects/basic_network/render.cpp @ 108:3068421c0737 ultra-staging

Merged default into ultra-staging
author Giulio Moro <giuliomoro@yahoo.it>
date Tue, 18 Aug 2015 00:35:15 +0100
parents d3f869b98147 a55dcdcebdcd
children
line wrap: on
line diff
--- a/projects/basic_network/render.cpp	Mon Jun 08 01:07:48 2015 +0100
+++ b/projects/basic_network/render.cpp	Tue Aug 18 00:35:15 2015 +0100
@@ -5,15 +5,13 @@
  *      Author: parallels
  */
 
-#include "../../include/RTAudioSettings.h"
-#include "../../include/render.h"
+#include <BeagleRT.h>
+//#include <rtdk.h>
 #include <cmath>
-//#include "../../include/client.h"
-#include "../../include/RTAudio.h"	// to schedule lower prio parallel process
-#include <rtdk.h>
-#include <native/timer.h>
-#include "../../include/PRU.h"
-#include "../../include/UdpClient.h"
+#include <UdpClient.h>
+#include <Utilities.h>
+
+AuxiliaryTask transmitReceiveDataTask;
 
 #define NETWORK_AUDIO_BUFFER_SIZE 400 //1400/4 //maximum payload for a UDP datagram over ethernet is 1472 bytes, I leave some headroom and divide by 4 to get the number of floats
 struct networkAudio{
@@ -33,18 +31,8 @@
 //networkData networkObject;
 #define numNetAudio 3
 networkAudio netAudio[numNetAudio];
-extern PRU *gPRU;
 AuxiliaryTask printIntervalTask;
 AuxiliaryTask transmitReceiveAudioTask;
-void transmitReceiveData(){ //transmit and receive asynchronous messages
-//   	printf("transmitReceiveData auxiliary task has started\n");
-//	while(!gShouldStop){
-//		sendMessage(&networkObject);
-//		receiveMessage(networkObject);
-//		usleep(1000);
-//	}
-//	closeSockets();
-}
 
 void transmitReceiveAudio(){ //transmit and receive audio buffers
 	for(int n=0;n<numNetAudio; n++){
@@ -56,8 +44,7 @@
 	}
 }
 
-
-// initialise_render() is called once before the audio rendering starts.
+// setup() is called once before the audio rendering starts.
 // Use it to perform any initialisation and allocation which is dependent
 // on the period size or sample rate.
 //
@@ -65,16 +52,12 @@
 // in from the call to initAudio().
 //
 // Return true on success; returning false halts the program.
-bool initialise_render(int numMatrixChannels, int numDigitalChannels, int numAudioChannels,
-					   int numMatrixFramesPerPeriod,
-					   int numAudioFramesPerPeriod,
-					   float matrixSampleRate, float audioSampleRate,
-					   void *userData, RTAudioSettings *settings)
+bool setup(BeagleRTContext *context, void *userData)
 {
 	// Retrieve a parameter passed in from the initAudio() call
 	gFrequency = *(float *)userData;
 
-	gInverseSampleRate = 1.0 / audioSampleRate;
+	gInverseSampleRate = 1.0 / context->audioSampleRate;
 	gPhase = 0.0;
 
 //	networkObject.counter=&gCount;
@@ -86,14 +69,16 @@
 		netAudio[n].index=0;
 		netAudio[n].currentBuffer=0;
 		netAudio[n].toBeSent=false;
-		netAudio[n].udpClient.setPort(settings->transmitPort+n);
-		netAudio[n].udpClient.setServer(settings->serverName);
+//		netAudio[n].udpClient.setPort(settings->transmitPort+n);
+//		netAudio[n].udpClient.setServer(settings->serverName);
+		netAudio[n].udpClient.setPort(9999+n);
+		netAudio[n].udpClient.setServer("192.168.7.1");
 	}
 //	setupSockets(settings->receivePort, settings->transmitPort, settings->serverName);
 
-//	transmitReceiveDataTask=createAuxiliaryTaskLoop(*transmitReceiveData, 10, "transmit-receive-data");
+//	transmitReceiveDataTask=createAuxiliaryTask(*transmitReceiveData, 10, "transmit-receive-data");
 //	scheduleAuxiliaryTask(transmitReceiveDataTask); //here it does not work
-	transmitReceiveAudioTask=createAuxiliaryTaskLoop(*transmitReceiveAudio, 98, "transmit-receive-audio");
+	transmitReceiveAudioTask=BeagleRT_createAuxiliaryTask(*transmitReceiveAudio, 98, "transmit-receive-audio");
 	return true;
 }
 
@@ -102,19 +87,35 @@
 // ADCs and DACs (if available). If only audio is available, numMatrixFrames
 // will be 0.
 
-void render(int numAnalogFrames, int numAudioFrames, int numDigitalFrames, float *audioIn, float *audioOut,
-		float *analogIn, float *analogOut, uint32_t *digital)
-{
-	for(int n = 0; n < numAudioFrames; n++) {
+void render(BeagleRTContext *context, void *userData)
+{/*
+	for(unsigned int n = 0; n < context->audioFrames; n++) {
 		float out = 0.7f * sinf(gPhase);
 		gPhase += 2.0 * M_PI * gFrequency * gInverseSampleRate;
 		if(gPhase > 2.0 * M_PI)
 			gPhase -= 2.0 * M_PI;
 
-//		for(int channel = 0; channel < gNumAudioChannels; channel++)
-//			audioOut[n * gNumAudioChannels + channel] = audioIn[n * gNumAudioChannels + 0]+audioIn[n * gNumAudioChannels + 1];
-		audioOut[n * gNumAudioChannels] = audioIn[n*gNumAudioChannels+0];
-		audioOut[n * gNumAudioChannels+1]=out;
+		for(unsigned int channel = 0; channel < context->audioChannels; channel++)
+			context->audioOut[n * context->audioChannels + channel] = out;
+
+		if(gCount == 0){
+			BeagleRT_scheduleAuxiliaryTask(transmitReceiveDataTask);
+		}
+		gCount++;
+	}
+
+
+*/
+	for(int n = 0; n < context->audioFrames; n++) {
+		float out = 0.7f * sinf(gPhase);
+		gPhase += 2.0 * M_PI * gFrequency * gInverseSampleRate;
+		if(gPhase > 2.0 * M_PI)
+			gPhase -= 2.0 * M_PI;
+
+//		for(int channel = 0; channel < context->audioChannels; channel++)
+//			context->audioOut[n * context->audioChannels + channel] = context->audioIn[n * context->audioChannels + 0]+context->audioIn[n * context->audioChannels + 1];
+		context->audioOut[n * context->audioChannels] = context->audioIn[n*context->audioChannels+0];
+		context->audioOut[n * context->audioChannels+1]=out;
 		if(0==gCount){
 //			scheduleAuxiliaryTask(transmitReceiveDataTask);
 		}
@@ -127,24 +128,23 @@
 				netAudio[j].timestamp=gCount;
 				netAudio[j].currentBuffer=!netAudio[j].currentBuffer; //switch buffer
 				netAudio[j].doneOnTime=0;
-				scheduleAuxiliaryTask(transmitReceiveAudioTask); //send the buffer
+				BeagleRT_scheduleAuxiliaryTask(transmitReceiveAudioTask); //send the buffer
 			}
 		}
 		if((gCount&1)==0){
-			netAudio[1].buffers[netAudio[1].currentBuffer][netAudio[1].index++]=analogRead(0,n/2)+audioOut[n*gNumAudioChannels + 0];
-			netAudio[2].buffers[netAudio[2].currentBuffer][netAudio[2].index++]=analogRead(1,n/2)+audioOut[n*gNumAudioChannels + 0];
+			netAudio[1].buffers[netAudio[1].currentBuffer][netAudio[1].index++]=analogReadFrame(context,n/2,0)+context->audioOut[n*context->audioChannels + 0];
+			netAudio[2].buffers[netAudio[2].currentBuffer][netAudio[2].index++]=analogReadFrame(context,n/2,1)+context->audioOut[n*context->audioChannels + 0];
 		}
-		netAudio[0].buffers[netAudio[0].currentBuffer][netAudio[0].index++]=0.5*(out+audioOut[n*gNumAudioChannels + 0]);//copy channel 0 to the buffer
-//		netAudio[1].buffers[netAudio[1].currentBuffer][netAudio[1].index++]=0.5*(out+audioOut[n*gNumAudioChannels + 0]);
-//		netAudio[2].buffers[netAudio[2].currentBuffer][netAudio[2].index++]=0.5*(out+audioOut[n*gNumAudioChannels + 0]);
+		netAudio[0].buffers[netAudio[0].currentBuffer][netAudio[0].index++]=0.5*(out+context->audioOut[n*context->audioChannels + 0]);//copy channel 0 to the buffer
+//		netAudio[1].buffers[netAudio[1].currentBuffer][netAudio[1].index++]=0.5*(out+context->audioOut[n*context->audioChannels + 0]);
+//		netAudio[2].buffers[netAudio[2].currentBuffer][netAudio[2].index++]=0.5*(out+context->audioOut[n*context->audioChannels + 0]);
 		gCount++;
 	}
 }
 
-// cleanup_render() is called once at the end, after the audio has stopped.
-// Release any resources that were allocated in initialise_render().
+// cleanup() is called once at the end, after the audio has stopped.
+// Release any resources that were allocated in setup().
 
-void cleanup_render()
+void cleanup(BeagleRTContext *context, void *userData)
 {
-//	closeSockets();
 }