Mercurial > hg > beaglert
comparison projects/basic_network/render.cpp @ 54:d3f869b98147 ultra-staging
Udp transmission working (enough) for demo in Nottingham
author | Giulio Moro <giuliomoro@yahoo.it> |
---|---|
date | Mon, 08 Jun 2015 01:07:48 +0100 |
parents | 4255ecbb9bec |
children | 3068421c0737 |
comparison
equal
deleted
inserted
replaced
53:6907e2177eb8 | 54:d3f869b98147 |
---|---|
6 */ | 6 */ |
7 | 7 |
8 #include "../../include/RTAudioSettings.h" | 8 #include "../../include/RTAudioSettings.h" |
9 #include "../../include/render.h" | 9 #include "../../include/render.h" |
10 #include <cmath> | 10 #include <cmath> |
11 #include "../../include/client.h" | 11 //#include "../../include/client.h" |
12 #include "../../include/RTAudio.h" // to schedule lower prio parallel process | 12 #include "../../include/RTAudio.h" // to schedule lower prio parallel process |
13 #include <rtdk.h> | 13 #include <rtdk.h> |
14 #include <native/timer.h> | 14 #include <native/timer.h> |
15 #include "../../include/PRU.h" | 15 #include "../../include/PRU.h" |
16 #include "../../include/UdpClient.h" | |
17 | |
18 #define NETWORK_AUDIO_BUFFER_SIZE 400 //1400/4 //maximum payload for a UDP datagram over ethernet is 1472 bytes, I leave some headroom and divide by 4 to get the number of floats | |
19 struct networkAudio{ | |
20 int timestamp; | |
21 int currentBuffer; | |
22 int index; | |
23 float buffers[2][NETWORK_AUDIO_BUFFER_SIZE]; | |
24 int doneOnTime; | |
25 bool toBeSent; | |
26 UdpClient udpClient; | |
27 }; | |
28 | |
16 float gFrequency; | 29 float gFrequency; |
17 float gPhase; | 30 float gPhase; |
18 float gInverseSampleRate; | 31 float gInverseSampleRate; |
19 int gCount=0; | 32 int gCount=0; |
20 networkData networkObject; | 33 //networkData networkObject; |
21 networkAudio netAudio; | 34 #define numNetAudio 3 |
35 networkAudio netAudio[numNetAudio]; | |
22 extern PRU *gPRU; | 36 extern PRU *gPRU; |
23 AuxiliaryTask printIntervalTask; | 37 AuxiliaryTask printIntervalTask; |
24 AuxiliaryTask transmitReceiveAudioTask; | 38 AuxiliaryTask transmitReceiveAudioTask; |
25 void transmitReceiveData(){ //transmit and receive asynchronous messages | 39 void transmitReceiveData(){ //transmit and receive asynchronous messages |
26 // printf("transmitReceiveData auxiliary task has started\n"); | 40 // printf("transmitReceiveData auxiliary task has started\n"); |
31 // } | 45 // } |
32 // closeSockets(); | 46 // closeSockets(); |
33 } | 47 } |
34 | 48 |
35 void transmitReceiveAudio(){ //transmit and receive audio buffers | 49 void transmitReceiveAudio(){ //transmit and receive audio buffers |
36 sendAudio(&netAudio); | 50 for(int n=0;n<numNetAudio; n++){ |
51 if(netAudio[n].toBeSent){ | |
52 netAudio[n].toBeSent=false; | |
53 netAudio[n].udpClient.send(netAudio[n].buffers[!netAudio[n].currentBuffer],NETWORK_AUDIO_BUFFER_SIZE*sizeof(float)); | |
54 netAudio[n].doneOnTime=1; | |
55 } | |
56 } | |
37 } | 57 } |
38 | 58 |
39 | 59 |
40 // initialise_render() is called once before the audio rendering starts. | 60 // initialise_render() is called once before the audio rendering starts. |
41 // Use it to perform any initialisation and allocation which is dependent | 61 // Use it to perform any initialisation and allocation which is dependent |
59 | 79 |
60 // networkObject.counter=&gCount; | 80 // networkObject.counter=&gCount; |
61 // networkObject.variables[0]=&gFrequency; | 81 // networkObject.variables[0]=&gFrequency; |
62 // networkObject.variables[1]=&gPhase; | 82 // networkObject.variables[1]=&gPhase; |
63 // networkObject.numVariables=2; | 83 // networkObject.numVariables=2; |
64 // netAudio.doneOnTime=1; | 84 for(int n=0; n<numNetAudio; n++){ |
65 // netAudio.index=0; | 85 netAudio[n].doneOnTime=1; |
66 // netAudio.currentBuffer=0; | 86 netAudio[n].index=0; |
87 netAudio[n].currentBuffer=0; | |
88 netAudio[n].toBeSent=false; | |
89 netAudio[n].udpClient.setPort(settings->transmitPort+n); | |
90 netAudio[n].udpClient.setServer(settings->serverName); | |
91 } | |
67 // setupSockets(settings->receivePort, settings->transmitPort, settings->serverName); | 92 // setupSockets(settings->receivePort, settings->transmitPort, settings->serverName); |
68 //// transmitReceiveDataTask=createAuxiliaryTaskLoop(*transmitReceiveData, 10, "transmit-receive-data"); | 93 |
69 //// scheduleAuxiliaryTask(transmitReceiveDataTask); //here it does not work | 94 // transmitReceiveDataTask=createAuxiliaryTaskLoop(*transmitReceiveData, 10, "transmit-receive-data"); |
70 // transmitReceiveAudioTask=createAuxiliaryTaskLoop(*transmitReceiveAudio, 98, "transmit-receive-audio"); | 95 // scheduleAuxiliaryTask(transmitReceiveDataTask); //here it does not work |
96 transmitReceiveAudioTask=createAuxiliaryTaskLoop(*transmitReceiveAudio, 98, "transmit-receive-audio"); | |
71 return true; | 97 return true; |
72 } | 98 } |
73 | 99 |
74 // render() is called regularly at the highest priority by the audio engine. | 100 // render() is called regularly at the highest priority by the audio engine. |
75 // Input and output are given from the audio hardware and the other | 101 // Input and output are given from the audio hardware and the other |
79 void render(int numAnalogFrames, int numAudioFrames, int numDigitalFrames, float *audioIn, float *audioOut, | 105 void render(int numAnalogFrames, int numAudioFrames, int numDigitalFrames, float *audioIn, float *audioOut, |
80 float *analogIn, float *analogOut, uint32_t *digital) | 106 float *analogIn, float *analogOut, uint32_t *digital) |
81 { | 107 { |
82 for(int n = 0; n < numAudioFrames; n++) { | 108 for(int n = 0; n < numAudioFrames; n++) { |
83 float out = 0.7f * sinf(gPhase); | 109 float out = 0.7f * sinf(gPhase); |
84 float fake=0.1; | |
85 for(int a=0; a<24; a++){ | |
86 fake = 0.7f * sinf(fake+out); | |
87 } | |
88 fake/=1000000000000000; | |
89 gPhase += 2.0 * M_PI * gFrequency * gInverseSampleRate; | 110 gPhase += 2.0 * M_PI * gFrequency * gInverseSampleRate; |
90 if(gPhase > 2.0 * M_PI) | 111 if(gPhase > 2.0 * M_PI) |
91 gPhase -= 2.0 * M_PI; | 112 gPhase -= 2.0 * M_PI; |
92 | 113 |
93 // for(int channel = 0; channel < gNumAudioChannels; channel++) | 114 // for(int channel = 0; channel < gNumAudioChannels; channel++) |
94 // audioOut[n * gNumAudioChannels + channel] = audioIn[n * gNumAudioChannels + 0]+audioIn[n * gNumAudioChannels + 1]; | 115 // audioOut[n * gNumAudioChannels + channel] = audioIn[n * gNumAudioChannels + 0]+audioIn[n * gNumAudioChannels + 1]; |
95 audioOut[n * gNumAudioChannels] = fake*0.0000000001; | 116 audioOut[n * gNumAudioChannels] = audioIn[n*gNumAudioChannels+0]; |
96 audioOut[n * gNumAudioChannels+1]=out; | 117 audioOut[n * gNumAudioChannels+1]=out; |
97 if(0==gCount){ | 118 if(0==gCount){ |
98 // scheduleAuxiliaryTask(transmitReceiveDataTask); | 119 // scheduleAuxiliaryTask(transmitReceiveDataTask); |
99 } | 120 } |
100 // if(netAudio.index==(NETWORK_AUDIO_BUFFER_SIZE)){ // when the buffer is ready ... | 121 for(int j=0; j<numNetAudio; j++){ |
101 // netAudio.index=0; //reset the counter | 122 if(netAudio[j].index==(NETWORK_AUDIO_BUFFER_SIZE)){ // when the buffer is ready ... |
102 // if(netAudio.doneOnTime==0) | 123 netAudio[j].toBeSent=true; |
103 // rt_printf("Network buffer underrun :-{\n"); | 124 netAudio[j].index=0; //reset the counter |
104 // netAudio.timestamp=gCount; | 125 if(netAudio[j].doneOnTime==0) |
105 // netAudio.currentBuffer=!netAudio.currentBuffer; //switch buffer | 126 rt_printf("Network buffer underrun :-{\n"); |
106 // netAudio.doneOnTime=0; | 127 netAudio[j].timestamp=gCount; |
107 // scheduleAuxiliaryTask(transmitReceiveAudioTask); //send the buffer | 128 netAudio[j].currentBuffer=!netAudio[j].currentBuffer; //switch buffer |
108 // } | 129 netAudio[j].doneOnTime=0; |
109 // netAudio.buffers[netAudio.currentBuffer][netAudio.index++]=audioOut[n*gNumAudioChannels + 0];//copy channel 0 to the buffer | 130 scheduleAuxiliaryTask(transmitReceiveAudioTask); //send the buffer |
131 } | |
132 } | |
133 if((gCount&1)==0){ | |
134 netAudio[1].buffers[netAudio[1].currentBuffer][netAudio[1].index++]=analogRead(0,n/2)+audioOut[n*gNumAudioChannels + 0]; | |
135 netAudio[2].buffers[netAudio[2].currentBuffer][netAudio[2].index++]=analogRead(1,n/2)+audioOut[n*gNumAudioChannels + 0]; | |
136 } | |
137 netAudio[0].buffers[netAudio[0].currentBuffer][netAudio[0].index++]=0.5*(out+audioOut[n*gNumAudioChannels + 0]);//copy channel 0 to the buffer | |
138 // netAudio[1].buffers[netAudio[1].currentBuffer][netAudio[1].index++]=0.5*(out+audioOut[n*gNumAudioChannels + 0]); | |
139 // netAudio[2].buffers[netAudio[2].currentBuffer][netAudio[2].index++]=0.5*(out+audioOut[n*gNumAudioChannels + 0]); | |
110 gCount++; | 140 gCount++; |
111 } | 141 } |
112 } | 142 } |
113 | 143 |
114 // cleanup_render() is called once at the end, after the audio has stopped. | 144 // cleanup_render() is called once at the end, after the audio has stopped. |