Mercurial > hg > beaglert
view examples/basic_libpd/render.cpp @ 345:0e1e0dfe24c5 prerelease
Added embryonal support for digital messages with libpd. TODO: test it, only process used channels, add defines. It is quite CPU-intensive
author | Giulio Moro <giuliomoro@yahoo.it> |
---|---|
date | Tue, 07 Jun 2016 03:40:01 +0100 |
parents | 4823ee13bcac |
children | db1e024858b0 |
line wrap: on
line source
/* * render.cpp * * Created on: Oct 24, 2014 * Author: parallels */ #include <Bela.h> #include <DigitalToMessage.h> #include <cmath> #include <Utilities.h> #include <I2c_Codec.h> #include <PRU.h> #include <stdio.h> #include "z_libpd.h" #include "z_queued.h" #include "s_stuff.h" #include <UdpServer.h> #include <Midi.h> //extern t_sample* sys_soundin; //extern t_sample* sys_soundout; // if you are 100% sure of what value was used to compile libpd/puredata, then // you could #define this instead of getting it at runtime. It has proved to give some 0.3% // performance boost when it is 8 (thanks to vectorize optimizations I guess). int gBufLength; float* gInBuf; float* gOutBuf; void pdnoteon(int ch, int pitch, int vel) { printf("noteon: %d %d %d\n", ch, pitch, vel); } void Bela_printHook(const char *recv){ rt_printf("%s", recv); } void libpdReadFilesLoop(){ while(!gShouldStop){ // check for modified sockets/file descriptors // (libpd would normally do this every block WITHIN the audio thread) // not sure if this is thread-safe at the moment libpd_sys_microsleep(0); usleep(1000); } } #define PARSE_MIDI AuxiliaryTask libpdReadFilesTask; AuxiliaryTask libpdProcessMessageQueueTask; AuxiliaryTask libpdProcessMidiQueueTask; Midi midi; //UdpServer udpServer; void sendDigitalMessage(bool state, unsigned int delay, void* receiverName){ libpd_float((char*)receiverName, (float)state); // rt_printf("%s: %d\n", (char*)receiverName, state); } char receiverNames[16][21]={ {"bela_digitalIn11"},{"bela_digitalIn12"},{"bela_digitalIn13"},{"bela_digitalIn14"},{"bela_digitalIn15"}, {"bela_digitalIn16"},{"bela_digitalIn17"},{"bela_digitalIn18"},{"bela_digitalIn19"},{"bela_digitalIn20"}, {"bela_digitalIn21"},{"bela_digitalIn22"},{"bela_digitalIn23"},{"bela_digitalIn24"},{"bela_digitalIn25"}, {"bela_digitalIn26"} }; static DigitalToMessage** dtm; static unsigned int analogChannelsInUse; static unsigned int gLibpdBlockSize; static unsigned int gChannelsInUse = 26; bool setup(BelaContext *context, void *userData) { analogChannelsInUse = min(context->analogChannels, gChannelsInUse - context->audioChannels - context->digitalChannels); dtm = new DigitalToMessage* [context->digitalChannels]; if(context->digitalChannels > 0){ for(unsigned int ch = 0; ch < context->digitalChannels; ++ch){ dtm[ch] = new DigitalToMessage; dtm[ch]->setCallback(sendDigitalMessage, receiverNames[ch]); pinMode(context, 0, ch, OUTPUT); } } midi.readFrom(0); midi.writeTo(0); #ifdef PARSE_MIDI midi.enableParser(true); #else midi.enableParser(false); #endif /* PARSE_MIDI */ // gChannelsInUse = min((int)(context->analogChannels+context->audioChannels), (int)gChannelsInUse); // udpServer.bindToPort(1234); gLibpdBlockSize = libpd_blocksize(); // check that we are not running with a blocksize smaller than gLibPdBlockSize // it would still work, but the load would be executed unevenly between calls to render if(context->audioFrames < gLibpdBlockSize){ fprintf(stderr, "Error: minimum block size must be %d\n", gLibpdBlockSize); return false; } // init pd libpd_set_queued_printhook(Bela_printHook); // set this before calling libpd_init libpd_set_queued_noteonhook(pdnoteon); //TODO: add hooks for other midi events and generate MIDI output appropriately libpd_queued_init(); //TODO: ideally, we would analyse the ASCII of the patch file and find the in/outs to use libpd_init_audio(gChannelsInUse, gChannelsInUse, context->audioSampleRate); libpd_start_message(1); // one entry in list libpd_add_float(1.0f); libpd_finish_message("pd", "dsp"); gBufLength = max(gLibpdBlockSize, context->audioFrames); char file[] = "_main.pd"; char folder[] = "./"; // open patch [; pd open file folder( libpd_openfile(file, folder); gInBuf = libpd_get_sys_soundin(); gOutBuf = libpd_get_sys_soundout(); libpdReadFilesTask = Bela_createAuxiliaryTask(libpdReadFilesLoop, 60, "libpdReadFiles"); Bela_scheduleAuxiliaryTask(libpdReadFilesTask); // Higher priority for the midi queue and lower priority for the message queue. Adjust to taste libpdProcessMidiQueueTask = Bela_createAuxiliaryTask(libpd_queued_receive_midi_messages, 80, "libpdProcessMidiQueue"); libpdProcessMessageQueueTask = Bela_createAuxiliaryTask(libpd_queued_receive_pd_messages, 70, "libpdProcessMessageQueue"); return true; } // render() is called regularly at the highest priority by the audio engine. // Input and output are given from the audio hardware and the other // ADCs and DACs (if available). If only audio is available, numMatrixFrames // will be 0. void render(BelaContext *context, void *userData) { int num; // the safest thread-safe option to handle MIDI input is to process the MIDI buffer // from the audio thread. #ifdef PARSE_MIDI while((num = midi.getParser()->numAvailableMessages()) > 0){ static MidiChannelMessage message; message = midi.getParser()->getNextChannelMessage(); //message.prettyPrint(); // use this to print beautified message (channel, data bytes) switch(message.getType()){ case kmmNoteOn: { int noteNumber = message.getDataByte(0); int velocity = message.getDataByte(1); int channel = message.getChannel(); libpd_noteon(channel, noteNumber, velocity); break; } case kmmNoteOff: { /* PureData does not seem to handle noteoff messages as per the MIDI specs, * so that the noteoff velocity is ignored. Here we convert them to noteon * with a velocity of 0. */ int noteNumber = message.getDataByte(0); // int velocity = message.getDataByte(1); // would be ignored by Pd int channel = message.getChannel(); libpd_noteon(channel, noteNumber, 0); break; } case kmmControlChange: { int channel = message.getChannel(); int controller = message.getDataByte(0); int value = message.getDataByte(1); libpd_controlchange(channel, controller, value); break; } case kmmProgramChange: { int channel = message.getChannel(); int program = message.getDataByte(0); libpd_programchange(channel, program); break; } case kmmPolyphonicKeyPressure: { int channel = message.getChannel(); int pitch = message.getDataByte(0); int value = message.getDataByte(1); libpd_polyaftertouch(channel, pitch, value); break; } case kmmChannelPressure: { int channel = message.getChannel(); int value = message.getDataByte(0); libpd_aftertouch(channel, value); break; } case kmmPitchBend: { int channel = message.getChannel(); int value = (message.getDataByte(1) << 7)| message.getDataByte(0); libpd_pitchbend(channel, value); break; } case kmmNone: case kmmAny: break; } } #else int input; while((input = midi.getInput()) >= 0){ libpd_midibyte(0, input); } #endif /* PARSE_MIDI */ /* * NOTE: if you are only using audio (or only analogs) and you are using interleaved buffers * and the blocksize of Bela is the same as gLibPdBlockSize, then you probably * do not need the for loops before and after libpd_process_float, so you can save quite some * memory operations. */ static unsigned int numberOfPdBlocksToProcess = gBufLength / gLibpdBlockSize; for(unsigned int tick = 0; tick < numberOfPdBlocksToProcess; ++tick){ unsigned int audioFrameBase = gLibpdBlockSize * tick; unsigned int j; unsigned int k; float* p0; float* p1; for (j = 0, p0 = gInBuf; j < gLibpdBlockSize; j++, p0++) { for (k = 0, p1 = p0; k < context->audioChannels; k++, p1 += gLibpdBlockSize) { *p1 = audioRead(context, audioFrameBase + j, k); } } // then analogs // this loop resamples by ZOH, as needed, using m if(context->analogChannels == 8 ){ //hold the value for two frames for (j = 0, p0 = gInBuf; j < gLibpdBlockSize; j++, p0++) { for (k = 0, p1 = p0 + gLibpdBlockSize * context->audioChannels; k < analogChannelsInUse; k++, p1 += gLibpdBlockSize) { unsigned int analogFrame = (audioFrameBase + j) / 2; *p1 = analogRead(context, analogFrame, k); } } } else if(context->analogChannels == 4){ //write every frame for (j = 0, p0 = gInBuf; j < gLibpdBlockSize; j++, p0++) { for (k = 0, p1 = p0 + gLibpdBlockSize * context->audioChannels; k < analogChannelsInUse; k++, p1 += gLibpdBlockSize) { unsigned int analogFrame = audioFrameBase + j; *p1 = analogRead(context, analogFrame, k); } } } else if(context->analogChannels == 2){ //drop every other frame for (j = 0, p0 = gInBuf; j < gLibpdBlockSize; j++, p0++) { for (k = 0, p1 = p0 + gLibpdBlockSize * context->audioChannels; k < analogChannelsInUse; k++, p1 += gLibpdBlockSize) { unsigned int analogFrame = (audioFrameBase + j) * 2; *p1 = analogRead(context, analogFrame, k); } } } //then digital //TODO: in multiple places we assume that the number of digitals is same as number of audio for(unsigned int n = 0; n < context->digitalChannels; ++n){ // TODO: note that we consider only the first sample of the block // considering all of them is notably more expensive // TODO: only process the channels marked as such dtm[n]->process(n + 16, &context->digital[audioFrameBase], 1); } libpd_process_sys(); // process the block //digital for (j = 0, p0 = gOutBuf; j < gLibpdBlockSize; ++j, ++p0) { unsigned int digitalFrame = (audioFrameBase + j); for (k = 0, p1 = p0 + gLibpdBlockSize * (context->audioChannels + 8); k < context->digitalChannels; k++, p1 += gLibpdBlockSize) { // TODO: only process the channels marked as such digitalWriteOnce(context, digitalFrame, k, *p1 > 0.5); } } //audio for (j = 0, p0 = gOutBuf; j < gLibpdBlockSize; j++, p0++) { for (k = 0, p1 = p0; k < context->audioChannels; k++, p1 += gLibpdBlockSize) { audioWrite(context, audioFrameBase + j, k, *p1); } } //analog if(context->analogChannels == 8){ for (j = 0, p0 = gOutBuf; j < gLibpdBlockSize; j += 2, p0 += 2) { //write every two frames unsigned int analogFrame = (audioFrameBase + j) / 2; for (k = 0, p1 = p0 + gLibpdBlockSize * context->audioChannels; k < analogChannelsInUse; k++, p1 += gLibpdBlockSize) { analogWriteOnce(context, analogFrame, k, *p1); } } } else if(context->analogChannels == 4){ //write every frame for (j = 0, p0 = gOutBuf; j < gLibpdBlockSize; ++j, ++p0) { unsigned int analogFrame = (audioFrameBase + j); for (k = 0, p1 = p0 + gLibpdBlockSize * context->audioChannels; k < analogChannelsInUse; k++, p1 += gLibpdBlockSize) { analogWriteOnce(context, analogFrame, k, *p1); } } } else if(context->analogChannels == 2){ //write every frame twice for (j = 0, p0 = gOutBuf; j < gLibpdBlockSize; j++, p0++) { for (k = 0, p1 = p0 + gLibpdBlockSize * context->audioChannels; k < analogChannelsInUse; k++, p1 += gLibpdBlockSize) { int analogFrame = audioFrameBase * 2 + j * 2; analogWriteOnce(context, analogFrame, k, *p1); analogWriteOnce(context, analogFrame + 1, k, *p1); } } } } Bela_scheduleAuxiliaryTask(libpdProcessMidiQueueTask); Bela_scheduleAuxiliaryTask(libpdProcessMessageQueueTask); } // cleanup() is called once at the end, after the audio has stopped. // Release any resources that were allocated in setup(). void cleanup(BelaContext *context, void *userData) { libpd_queued_release(); delete[] dtm; }