Mercurial > hg > beaglert
comparison projects/basic_network/render.cpp @ 235:3d41a6fa1830
Merge
author | Giulio Moro <giuliomoro@yahoo.it> |
---|---|
date | Sun, 10 Apr 2016 04:08:06 +0200 |
parents | 6a23c07d0fbb |
children | ac8eb07afcf5 |
comparison
equal
deleted
inserted
replaced
216:869f5e703844 | 235:3d41a6fa1830 |
---|---|
6 */ | 6 */ |
7 | 7 |
8 #include <BeagleRT.h> | 8 #include <BeagleRT.h> |
9 //#include <rtdk.h> | 9 //#include <rtdk.h> |
10 #include <cmath> | 10 #include <cmath> |
11 #include <UdpClient.h> | 11 #include <NetworkSend.h> |
12 #include <ReceiveAudioThread.h> | |
12 #include <Utilities.h> | 13 #include <Utilities.h> |
13 | |
14 AuxiliaryTask transmitReceiveDataTask; | |
15 | |
16 #define NETWORK_AUDIO_BUFFER_SIZE 400 //1400/4 //maximum payload for a UDP datagram over ethernet is 1472 bytes, I leave some headroom and divide by 4 to get the number of floats | |
17 struct networkAudio{ | |
18 int timestamp; | |
19 int currentBuffer; | |
20 int index; | |
21 float buffers[2][NETWORK_AUDIO_BUFFER_SIZE]; | |
22 int doneOnTime; | |
23 bool toBeSent; | |
24 UdpClient udpClient; | |
25 }; | |
26 | |
27 float gFrequency; | |
28 float gPhase; | |
29 float gInverseSampleRate; | |
30 int gCount=0; | |
31 //networkData networkObject; | |
32 #define numNetAudio 3 | |
33 networkAudio netAudio[numNetAudio]; | |
34 AuxiliaryTask printIntervalTask; | |
35 AuxiliaryTask transmitReceiveAudioTask; | |
36 | |
37 void transmitReceiveAudio(){ //transmit and receive audio buffers | |
38 for(int n=0;n<numNetAudio; n++){ | |
39 if(netAudio[n].toBeSent){ | |
40 netAudio[n].toBeSent=false; | |
41 netAudio[n].udpClient.send(netAudio[n].buffers[!netAudio[n].currentBuffer],NETWORK_AUDIO_BUFFER_SIZE*sizeof(float)); | |
42 netAudio[n].doneOnTime=1; | |
43 } | |
44 } | |
45 } | |
46 | 14 |
47 // setup() is called once before the audio rendering starts. | 15 // setup() is called once before the audio rendering starts. |
48 // Use it to perform any initialisation and allocation which is dependent | 16 // Use it to perform any initialisation and allocation which is dependent |
49 // on the period size or sample rate. | 17 // on the period size or sample rate. |
50 // | 18 // |
51 // userData holds an opaque pointer to a data structure that was passed | 19 // userData holds an opaque pointer to a data structure that was passed |
52 // in from the call to initAudio(). | 20 // in from the call to initAudio(). |
53 // | 21 // |
54 // Return true on success; returning false halts the program. | 22 // Return true on success; returning false halts the program. |
23 | |
24 NetworkSend networkSend; | |
25 ReceiveAudioThread receive; | |
26 float gFrequency; | |
27 float gInverseSampleRate; | |
28 float gPhase; | |
55 bool setup(BeagleRTContext *context, void *userData) | 29 bool setup(BeagleRTContext *context, void *userData) |
56 { | 30 { |
57 // Retrieve a parameter passed in from the initAudio() call | 31 // Retrieve a parameter passed in from the initAudio() call |
58 gFrequency = *(float *)userData; | 32 gFrequency = *(float *)userData; |
59 | 33 |
34 networkSend.setup(context->audioSampleRate, context->audioFrames, 0, 9999, "192.168.7.1"); | |
35 receive.init(10000, context->audioFrames, 0); | |
36 receive.startThread(); | |
60 gInverseSampleRate = 1.0 / context->audioSampleRate; | 37 gInverseSampleRate = 1.0 / context->audioSampleRate; |
61 gPhase = 0.0; | 38 gPhase = 0; |
62 | |
63 // networkObject.counter=&gCount; | |
64 // networkObject.variables[0]=&gFrequency; | |
65 // networkObject.variables[1]=&gPhase; | |
66 // networkObject.numVariables=2; | |
67 for(int n=0; n<numNetAudio; n++){ | |
68 netAudio[n].doneOnTime=1; | |
69 netAudio[n].index=0; | |
70 netAudio[n].currentBuffer=0; | |
71 netAudio[n].toBeSent=false; | |
72 // netAudio[n].udpClient.setPort(settings->transmitPort+n); | |
73 // netAudio[n].udpClient.setServer(settings->serverName); | |
74 netAudio[n].udpClient.setPort(9999+n); | |
75 netAudio[n].udpClient.setServer("192.168.7.1"); | |
76 } | |
77 // setupSockets(settings->receivePort, settings->transmitPort, settings->serverName); | |
78 | |
79 // transmitReceiveDataTask=createAuxiliaryTask(*transmitReceiveData, 10, "transmit-receive-data"); | |
80 // scheduleAuxiliaryTask(transmitReceiveDataTask); //here it does not work | |
81 transmitReceiveAudioTask=BeagleRT_createAuxiliaryTask(*transmitReceiveAudio, 98, "transmit-receive-audio"); | |
82 return true; | 39 return true; |
83 } | 40 } |
84 | 41 |
85 // render() is called regularly at the highest priority by the audio engine. | 42 // render() is called regularly at the highest priority by the audio engine. |
86 // Input and output are given from the audio hardware and the other | 43 // Input and output are given from the audio hardware and the other |
87 // ADCs and DACs (if available). If only audio is available, numMatrixFrames | 44 // ADCs and DACs (if available). If only audio is available, numMatrixFrames |
88 // will be 0. | 45 // will be 0. |
89 | 46 |
90 void render(BeagleRTContext *context, void *userData) | 47 void render(BeagleRTContext *context, void *userData) |
91 {/* | 48 { |
92 for(unsigned int n = 0; n < context->audioFrames; n++) { | 49 for(unsigned int n = 0; n < context->audioFrames; n++) { |
93 float out = 0.7f * sinf(gPhase); | 50 float out = 0.7f * sinf(gPhase); |
94 gPhase += 2.0 * M_PI * gFrequency * gInverseSampleRate; | 51 gPhase += 2.0 * M_PI * gFrequency * gInverseSampleRate; |
95 if(gPhase > 2.0 * M_PI) | 52 if(gPhase > 2.0 * M_PI) |
96 gPhase -= 2.0 * M_PI; | 53 gPhase -= 2.0 * M_PI; |
97 | 54 |
98 for(unsigned int channel = 0; channel < context->audioChannels; channel++) | 55 networkSend.log(out); |
99 context->audioOut[n * context->audioChannels + channel] = out; | 56 float in; |
100 | 57 int ret = receive.getSamplesSrc(&in, 1, 1); |
101 if(gCount == 0){ | 58 for(unsigned int channel = 0; channel < context->audioChannels; channel++){ |
102 BeagleRT_scheduleAuxiliaryTask(transmitReceiveDataTask); | 59 audioWriteFrame(context, n, channel, in); |
103 } | 60 } |
104 gCount++; | |
105 } | |
106 | |
107 | |
108 */ | |
109 for(int n = 0; n < context->audioFrames; n++) { | |
110 float out = 0.7f * sinf(gPhase); | |
111 gPhase += 2.0 * M_PI * gFrequency * gInverseSampleRate; | |
112 if(gPhase > 2.0 * M_PI) | |
113 gPhase -= 2.0 * M_PI; | |
114 | |
115 // for(int channel = 0; channel < context->audioChannels; channel++) | |
116 // context->audioOut[n * context->audioChannels + channel] = context->audioIn[n * context->audioChannels + 0]+context->audioIn[n * context->audioChannels + 1]; | |
117 context->audioOut[n * context->audioChannels] = context->audioIn[n*context->audioChannels+0]; | |
118 context->audioOut[n * context->audioChannels+1]=out; | |
119 if(0==gCount){ | |
120 // scheduleAuxiliaryTask(transmitReceiveDataTask); | |
121 } | |
122 for(int j=0; j<numNetAudio; j++){ | |
123 if(netAudio[j].index==(NETWORK_AUDIO_BUFFER_SIZE)){ // when the buffer is ready ... | |
124 netAudio[j].toBeSent=true; | |
125 netAudio[j].index=0; //reset the counter | |
126 if(netAudio[j].doneOnTime==0) | |
127 rt_printf("Network buffer underrun :-{\n"); | |
128 netAudio[j].timestamp=gCount; | |
129 netAudio[j].currentBuffer=!netAudio[j].currentBuffer; //switch buffer | |
130 netAudio[j].doneOnTime=0; | |
131 BeagleRT_scheduleAuxiliaryTask(transmitReceiveAudioTask); //send the buffer | |
132 } | |
133 } | |
134 if((gCount&1)==0){ | |
135 netAudio[1].buffers[netAudio[1].currentBuffer][netAudio[1].index++]=analogReadFrame(context,n/2,0)+context->audioOut[n*context->audioChannels + 0]; | |
136 netAudio[2].buffers[netAudio[2].currentBuffer][netAudio[2].index++]=analogReadFrame(context,n/2,1)+context->audioOut[n*context->audioChannels + 0]; | |
137 } | |
138 netAudio[0].buffers[netAudio[0].currentBuffer][netAudio[0].index++]=0.5*(out+context->audioOut[n*context->audioChannels + 0]);//copy channel 0 to the buffer | |
139 // netAudio[1].buffers[netAudio[1].currentBuffer][netAudio[1].index++]=0.5*(out+context->audioOut[n*context->audioChannels + 0]); | |
140 // netAudio[2].buffers[netAudio[2].currentBuffer][netAudio[2].index++]=0.5*(out+context->audioOut[n*context->audioChannels + 0]); | |
141 gCount++; | |
142 } | 61 } |
143 } | 62 } |
144 | 63 |
145 // cleanup() is called once at the end, after the audio has stopped. | 64 // cleanup() is called once at the end, after the audio has stopped. |
146 // Release any resources that were allocated in setup(). | 65 // Release any resources that were allocated in setup(). |