# HG changeset patch # User Chris Cannam # Date 1426006972 0 # Node ID 72c662fe7ea311077a1a163dd3afa32581587805 # Parent 618d5816b04df9eea992a67eb092cb98288646b2 Further dedicated-types fixes diff -r 618d5816b04d -r 72c662fe7ea3 audioio/AudioCallbackPlaySource.cpp --- a/audioio/AudioCallbackPlaySource.cpp Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/AudioCallbackPlaySource.cpp Tue Mar 10 17:02:52 2015 +0000 @@ -306,7 +306,7 @@ m_sourceSampleRate = 0; } - int lastEnd = 0; + sv_frame_t lastEnd = 0; for (std::set::const_iterator i = m_models.begin(); i != m_models.end(); ++i) { #ifdef DEBUG_AUDIO_PLAY_SOURCE @@ -367,7 +367,7 @@ rebuildRangeLists(); if (count == 0) { - if (m_writeBuffers) count = m_writeBuffers->size(); + if (m_writeBuffers) count = int(m_writeBuffers->size()); } cerr << "current playing frame = " << getCurrentPlayingFrame() << endl; @@ -593,7 +593,7 @@ AudioCallbackPlaySource::getTargetBlockSize() const { // cout << "AudioCallbackPlaySource::getTargetBlockSize() -> " << m_blockSize << endl; - return m_blockSize; + return int(m_blockSize); } void @@ -614,8 +614,8 @@ // This method attempts to estimate which audio sample frame is // "currently coming through the speakers". - int targetRate = getTargetSampleRate(); - int latency = m_playLatency; // at target rate + sv_samplerate_t targetRate = getTargetSampleRate(); + sv_frame_t latency = m_playLatency; // at target rate RealTime latency_t = RealTime::zeroTime; if (targetRate != 0) { @@ -655,8 +655,8 @@ } } - int readBufferFill = m_readBufferFill; - int lastRetrievedBlockSize = m_lastRetrievedBlockSize; + sv_frame_t readBufferFill = m_readBufferFill; + sv_frame_t lastRetrievedBlockSize = m_lastRetrievedBlockSize; double lastRetrievalTimestamp = m_lastRetrievalTimestamp; double currentTime = 0.0; if (m_target) currentTime = m_target->getCurrentTime(); @@ -665,7 +665,7 @@ RealTime inbuffer_t = RealTime::frame2RealTime(inbuffer, targetRate); - int stretchlat = 0; + sv_frame_t stretchlat = 0; double timeRatio = 1.0; if (m_timeStretcher) { @@ -755,7 +755,9 @@ ++index; } - if (inRange >= (int)m_rangeStarts.size()) inRange = m_rangeStarts.size()-1; + if (inRange >= int(m_rangeStarts.size())) { + inRange = int(m_rangeStarts.size())-1; + } RealTime playing_t = bufferedto_t; @@ -806,7 +808,7 @@ if (inRange == 0) { if (looping) { - inRange = m_rangeStarts.size() - 1; + inRange = int(m_rangeStarts.size()) - 1; } else { break; } @@ -854,7 +856,7 @@ m_rangeStarts.clear(); m_rangeDurations.clear(); - int sourceRate = getSourceSampleRate(); + sv_samplerate_t sourceRate = getSourceSampleRate(); if (sourceRate == 0) return; RealTime end = RealTime::frame2RealTime(m_lastModelEndFrame, sourceRate); @@ -1060,28 +1062,28 @@ } void -AudioCallbackPlaySource::setTimeStretch(float factor) +AudioCallbackPlaySource::setTimeStretch(double factor) { m_stretchRatio = factor; if (!getTargetSampleRate()) return; // have to make our stretcher later - if (m_timeStretcher || (factor == 1.f)) { + if (m_timeStretcher || (factor == 1.0)) { // stretch ratio will be set in next process call if appropriate } else { m_stretcherInputCount = getTargetChannelCount(); RubberBandStretcher *stretcher = new RubberBandStretcher - (getTargetSampleRate(), + (int(getTargetSampleRate()), m_stretcherInputCount, RubberBandStretcher::OptionProcessRealTime, factor); RubberBandStretcher *monoStretcher = new RubberBandStretcher - (getTargetSampleRate(), + (int(getTargetSampleRate()), 1, RubberBandStretcher::OptionProcessRealTime, factor); m_stretcherInputs = new float *[m_stretcherInputCount]; - m_stretcherInputSizes = new int[m_stretcherInputCount]; + m_stretcherInputSizes = new sv_frame_t[m_stretcherInputCount]; for (int c = 0; c < m_stretcherInputCount; ++c) { m_stretcherInputSizes[c] = 16384; m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]]; @@ -1146,12 +1148,12 @@ RubberBandStretcher *ts = m_timeStretcher; RubberBandStretcher *ms = m_monoStretcher; - float ratio = ts ? ts->getTimeRatio() : 1.f; + double ratio = ts ? ts->getTimeRatio() : 1.0; if (ratio != m_stretchRatio) { if (!ts) { cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: Time ratio change to " << m_stretchRatio << " is pending, but no stretcher is set" << endl; - m_stretchRatio = 1.f; + m_stretchRatio = 1.0; } else { ts->setTimeRatio(m_stretchRatio); if (ms) ms->setTimeRatio(m_stretchRatio); @@ -1186,10 +1188,10 @@ // this is marginally more likely to leave our channels in // sync after a processing failure than just passing "count": - int request = count; + sv_frame_t request = count; if (ch > 0) request = got; - got = rb->read(buffer[ch], request); + got = rb->read(buffer[ch], int(request)); #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING cout << "AudioCallbackPlaySource::getSamples: got " << got << " (of " << count << ") samples on channel " << ch << ", signalling for more (possibly)" << endl; @@ -1215,20 +1217,20 @@ } int channels = getTargetChannelCount(); - int available; + sv_frame_t available; + sv_frame_t fedToStretcher = 0; int warned = 0; - int fedToStretcher = 0; // The input block for a given output is approx output / ratio, // but we can't predict it exactly, for an adaptive timestretcher. while ((available = ts->available()) < count) { - int reqd = lrintf((count - available) / ratio); - reqd = std::max(reqd, (int)ts->getSamplesRequired()); + sv_frame_t reqd = lrint(double(count - available) / ratio); + reqd = std::max(reqd, sv_frame_t(ts->getSamplesRequired())); if (reqd == 0) reqd = 1; - int got = reqd; + sv_frame_t got = reqd; #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING cerr << "reqd = " <getReadSpace() << " to read" @@ -1759,7 +1761,7 @@ } } - int rf = m_readBufferFill; + sv_frame_t rf = m_readBufferFill; RingBuffer *rb = getReadRingBuffer(0); if (rb) { int rs = rb->getReadSpace(); @@ -1773,8 +1775,8 @@ SVDEBUG << "AudioCallbackPlaySource::unifyRingBuffers: m_readBufferFill = " << m_readBufferFill << ", rf = " << rf << ", m_writeBufferFill = " << m_writeBufferFill << endl; #endif - int wf = m_writeBufferFill; - int skip = 0; + sv_frame_t wf = m_writeBufferFill; + sv_frame_t skip = 0; for (int c = 0; c < getTargetChannelCount(); ++c) { RingBuffer *wb = getWriteRingBuffer(c); if (wb) { @@ -1792,7 +1794,7 @@ } // cout << "skipping " << skip << endl; - wb->skip(skip); + wb->skip(int(skip)); } } @@ -1835,10 +1837,9 @@ } else { - float ms = 100; + double ms = 100; if (s.getSourceSampleRate() > 0) { - ms = float(s.m_ringBufferSize) / - float(s.getSourceSampleRate()) * 1000.0; + ms = double(s.m_ringBufferSize) / s.getSourceSampleRate() * 1000.0; } if (s.m_playing) ms /= 10; diff -r 618d5816b04d -r 72c662fe7ea3 audioio/AudioCallbackPlaySource.h --- a/audioio/AudioCallbackPlaySource.h Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/AudioCallbackPlaySource.h Tue Mar 10 17:02:52 2015 +0000 @@ -201,7 +201,7 @@ /** * Set the time stretcher factor (i.e. playback speed). */ - void setTimeStretch(float factor); + void setTimeStretch(double factor); /** * Set the resampler quality, 0 - 2 where 0 is fastest and 2 is @@ -244,7 +244,9 @@ void playStatusChanged(bool isPlaying); - void sampleRateMismatch(int requested, int available, bool willResample); + void sampleRateMismatch(sv_samplerate_t requested, + sv_samplerate_t available, + bool willResample); void audioOverloadPluginDisabled(); void audioTimeStretchMultiChannelDisabled(); @@ -280,17 +282,17 @@ std::set m_models; RingBufferVector *m_readBuffers; RingBufferVector *m_writeBuffers; - int m_readBufferFill; - int m_writeBufferFill; + sv_frame_t m_readBufferFill; + sv_frame_t m_writeBufferFill; Scavenger m_bufferScavenger; int m_sourceChannelCount; - int m_blockSize; + sv_frame_t m_blockSize; sv_samplerate_t m_sourceSampleRate; sv_samplerate_t m_targetSampleRate; - int m_playLatency; + sv_frame_t m_playLatency; AudioCallbackPlayTarget *m_target; double m_lastRetrievalTimestamp; - int m_lastRetrievedBlockSize; + sv_frame_t m_lastRetrievedBlockSize; bool m_trustworthyTimestamps; sv_frame_t m_lastCurrentFrame; bool m_playing; @@ -328,12 +330,12 @@ RubberBand::RubberBandStretcher *m_timeStretcher; RubberBand::RubberBandStretcher *m_monoStretcher; - float m_stretchRatio; + double m_stretchRatio; bool m_stretchMono; - int m_stretcherInputCount; + int m_stretcherInputCount; float **m_stretcherInputs; - int *m_stretcherInputSizes; + sv_frame_t *m_stretcherInputSizes; // Called from fill thread, m_playing true, mutex held // Return true if work done diff -r 618d5816b04d -r 72c662fe7ea3 audioio/AudioGenerator.cpp --- a/audioio/AudioGenerator.cpp Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/AudioGenerator.cpp Tue Mar 10 17:02:52 2015 +0000 @@ -37,7 +37,7 @@ #include #include -const int +const sv_frame_t AudioGenerator::m_processingBlockSize = 1024; QString @@ -220,11 +220,11 @@ m_sourceSampleRate, m_processingBlockSize); - float clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0f); // required + double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId); - float level = wantsQuieterClips(model) ? 0.5 : 1.0; + double level = wantsQuieterClips(model) ? 0.5 : 1.0; if (!mixer->loadClipData(clipPath, clipF0, level)) { delete mixer; return 0; @@ -310,7 +310,7 @@ } } -int +sv_frame_t AudioGenerator::getBlockSize() const { return m_processingBlockSize; @@ -334,9 +334,9 @@ m_soloing = false; } -int -AudioGenerator::mixModel(Model *model, int startFrame, int frameCount, - float **buffer, int fadeIn, int fadeOut) +sv_frame_t +AudioGenerator::mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount, + float **buffer, sv_frame_t fadeIn, sv_frame_t fadeOut) { if (m_sourceSampleRate == 0) { cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; @@ -393,13 +393,13 @@ return frameCount; } -int +sv_frame_t AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, - int startFrame, int frames, + sv_frame_t startFrame, sv_frame_t frames, float **buffer, float gain, float pan, - int fadeIn, int fadeOut) + sv_frame_t fadeIn, sv_frame_t fadeOut) { - int maxFrames = frames + std::max(fadeIn, fadeOut); + sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut); int modelChannels = dtvm->getChannelCount(); @@ -420,7 +420,7 @@ m_channelBufSiz = maxFrames; } - int got = 0; + sv_frame_t got = 0; if (startFrame >= fadeIn/2) { got = dtvm->getData(0, modelChannels - 1, @@ -428,7 +428,7 @@ frames + fadeOut/2 + fadeIn/2, m_channelBuffer); } else { - int missing = fadeIn/2 - startFrame; + sv_frame_t missing = fadeIn/2 - startFrame; for (int c = 0; c < modelChannels; ++c) { m_channelBuffer[c] += missing; @@ -462,25 +462,27 @@ float channelGain = gain; if (pan != 0.0) { if (c == 0) { - if (pan > 0.0) channelGain *= 1.0 - pan; + if (pan > 0.0) channelGain *= 1.0f - pan; } else { - if (pan < 0.0) channelGain *= pan + 1.0; + if (pan < 0.0) channelGain *= pan + 1.0f; } } - for (int i = 0; i < fadeIn/2; ++i) { + for (sv_frame_t i = 0; i < fadeIn/2; ++i) { float *back = buffer[c]; back -= fadeIn/2; - back[i] += (channelGain * m_channelBuffer[sourceChannel][i] * i) / fadeIn; + back[i] += + (channelGain * m_channelBuffer[sourceChannel][i] * float(i)) + / float(fadeIn); } - for (int i = 0; i < frames + fadeOut/2; ++i) { + for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) { float mult = channelGain; if (i < fadeIn/2) { - mult = (mult * i) / fadeIn; + mult = (mult * float(i)) / float(fadeIn); } if (i > frames - fadeOut/2) { - mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut; + mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut); } float val = m_channelBuffer[sourceChannel][i]; if (i >= got) val = 0.f; @@ -491,15 +493,15 @@ return got; } -int +sv_frame_t AudioGenerator::mixClipModel(Model *model, - int startFrame, int frames, + sv_frame_t startFrame, sv_frame_t frames, float **buffer, float gain, float pan) { ClipMixer *clipMixer = m_clipMixerMap[model]; if (!clipMixer) return 0; - int blocks = frames / m_processingBlockSize; + int blocks = int(frames / m_processingBlockSize); //!!! todo: the below -- it matters @@ -513,7 +515,7 @@ //callback play source has to use that as a multiple for all the //calls to mixModel - int got = blocks * m_processingBlockSize; + sv_frame_t got = blocks * m_processingBlockSize; #ifdef DEBUG_AUDIO_GENERATOR cout << "mixModel [clip]: frames " << frames @@ -529,7 +531,7 @@ for (int i = 0; i < blocks; ++i) { - int reqStart = startFrame + i * m_processingBlockSize; + sv_frame_t reqStart = startFrame + i * m_processingBlockSize; NoteList notes; NoteExportable *exportable = dynamic_cast(model); @@ -544,7 +546,7 @@ for (NoteList::const_iterator ni = notes.begin(); ni != notes.end(); ++ni) { - int noteFrame = ni->start; + sv_frame_t noteFrame = ni->start; if (noteFrame < reqStart || noteFrame >= reqStart + m_processingBlockSize) continue; @@ -552,7 +554,7 @@ while (noteOffs.begin() != noteOffs.end() && noteOffs.begin()->frame <= noteFrame) { - int eventFrame = noteOffs.begin()->frame; + sv_frame_t eventFrame = noteOffs.begin()->frame; if (eventFrame < reqStart) eventFrame = reqStart; off.frameOffset = eventFrame - reqStart; @@ -568,7 +570,7 @@ on.frameOffset = noteFrame - reqStart; on.frequency = ni->getFrequency(); - on.level = float(ni->velocity) / 127.0; + on.level = float(ni->velocity) / 127.0f; on.pan = pan; #ifdef DEBUG_AUDIO_GENERATOR @@ -583,7 +585,7 @@ while (noteOffs.begin() != noteOffs.end() && noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { - int eventFrame = noteOffs.begin()->frame; + sv_frame_t eventFrame = noteOffs.begin()->frame; if (eventFrame < reqStart) eventFrame = reqStart; off.frameOffset = eventFrame - reqStart; @@ -609,10 +611,10 @@ return got; } -int +sv_frame_t AudioGenerator::mixContinuousSynthModel(Model *model, - int startFrame, - int frames, + sv_frame_t startFrame, + sv_frame_t frames, float **buffer, float gain, float pan) @@ -624,11 +626,11 @@ SparseTimeValueModel *stvm = qobject_cast(model); if (stvm->getScaleUnits() != "Hz") return 0; - int blocks = frames / m_processingBlockSize; + int blocks = int(frames / m_processingBlockSize); //!!! todo: see comment in mixClipModel - int got = blocks * m_processingBlockSize; + sv_frame_t got = blocks * m_processingBlockSize; #ifdef DEBUG_AUDIO_GENERATOR cout << "mixModel [synth]: frames " << frames @@ -639,7 +641,7 @@ for (int i = 0; i < blocks; ++i) { - int reqStart = startFrame + i * m_processingBlockSize; + sv_frame_t reqStart = startFrame + i * m_processingBlockSize; for (int c = 0; c < m_targetChannelCount; ++c) { bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; diff -r 618d5816b04d -r 72c662fe7ea3 audioio/AudioGenerator.h --- a/audioio/AudioGenerator.h Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/AudioGenerator.h Tue Mar 10 17:02:52 2015 +0000 @@ -32,6 +32,8 @@ #include #include +#include "base/BaseTypes.h" + class AudioGenerator : public QObject { Q_OBJECT @@ -74,13 +76,13 @@ * argument to all mixModel calls must be a multiple of this * value. */ - virtual int getBlockSize() const; + virtual sv_frame_t getBlockSize() const; /** * Mix a single model into an output buffer. */ - virtual int mixModel(Model *model, int startFrame, int frameCount, - float **buffer, int fadeIn = 0, int fadeOut = 0); + virtual sv_frame_t mixModel(Model *model, sv_frame_t startFrame, sv_frame_t frameCount, + float **buffer, sv_frame_t fadeIn = 0, sv_frame_t fadeOut = 0); /** * Specify that only the given set of models should be played. @@ -97,7 +99,7 @@ void playClipIdChanged(const Playable *, QString); protected: - int m_sourceSampleRate; + sv_samplerate_t m_sourceSampleRate; int m_targetChannelCount; int m_waveType; @@ -106,10 +108,10 @@ struct NoteOff { - NoteOff(float _freq, int _frame) : frequency(_freq), frame(_frame) { } + NoteOff(float _freq, sv_frame_t _frame) : frequency(_freq), frame(_frame) { } float frequency; - int frame; + sv_frame_t frame; struct Comparator { bool operator()(const NoteOff &n1, const NoteOff &n2) const { @@ -143,22 +145,22 @@ static void initialiseSampleDir(); - virtual int mixDenseTimeValueModel - (DenseTimeValueModel *model, int startFrame, int frameCount, - float **buffer, float gain, float pan, int fadeIn, int fadeOut); + virtual sv_frame_t mixDenseTimeValueModel + (DenseTimeValueModel *model, sv_frame_t startFrame, sv_frame_t frameCount, + float **buffer, float gain, float pan, sv_frame_t fadeIn, sv_frame_t fadeOut); - virtual int mixClipModel - (Model *model, int startFrame, int frameCount, + virtual sv_frame_t mixClipModel + (Model *model, sv_frame_t startFrame, sv_frame_t frameCount, float **buffer, float gain, float pan); - virtual int mixContinuousSynthModel - (Model *model, int startFrame, int frameCount, + virtual sv_frame_t mixContinuousSynthModel + (Model *model, sv_frame_t startFrame, sv_frame_t frameCount, float **buffer, float gain, float pan); - static const int m_processingBlockSize; + static const sv_frame_t m_processingBlockSize; float **m_channelBuffer; - int m_channelBufSiz; + sv_frame_t m_channelBufSiz; int m_channelBufCount; }; diff -r 618d5816b04d -r 72c662fe7ea3 audioio/AudioJACKTarget.cpp --- a/audioio/AudioJACKTarget.cpp Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/AudioJACKTarget.cpp Tue Mar 10 17:02:52 2015 +0000 @@ -439,14 +439,14 @@ buffers[ch] = (float *)jack_port_get_buffer(m_outputs[ch], nframes); } - int received = 0; + sv_frame_t received = 0; if (m_source) { received = m_source->getSourceSamples(nframes, buffers); } for (int ch = 0; ch < (int)m_outputs.size(); ++ch) { - for (int i = received; i < (int)nframes; ++i) { + for (sv_frame_t i = received; i < nframes; ++i) { buffers[ch][i] = 0.0; } } diff -r 618d5816b04d -r 72c662fe7ea3 audioio/AudioPortAudioTarget.cpp --- a/audioio/AudioPortAudioTarget.cpp Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/AudioPortAudioTarget.cpp Tue Mar 10 17:02:52 2015 +0000 @@ -52,7 +52,7 @@ m_bufferSize = 2048; m_sampleRate = 44100; if (m_source && (m_source->getSourceSampleRate() != 0)) { - m_sampleRate = m_source->getSourceSampleRate(); + m_sampleRate = int(m_source->getSourceSampleRate()); } PaStreamParameters op; @@ -185,7 +185,7 @@ int AudioPortAudioTarget::process(const void *, void *outputBuffer, - int nframes, + sv_frame_t nframes, const PaStreamCallbackTimeInfo *, PaStreamCallbackFlags) { @@ -244,7 +244,7 @@ } } - int received = m_source->getSourceSamples(nframes, tmpbuf); + sv_frame_t received = m_source->getSourceSamples(nframes, tmpbuf); float peakLeft = 0.0, peakRight = 0.0; diff -r 618d5816b04d -r 72c662fe7ea3 audioio/AudioPortAudioTarget.h --- a/audioio/AudioPortAudioTarget.h Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/AudioPortAudioTarget.h Tue Mar 10 17:02:52 2015 +0000 @@ -26,6 +26,8 @@ #include "AudioCallbackPlayTarget.h" +#include "base/BaseTypes.h" + class AudioCallbackPlaySource; class AudioPortAudioTarget : public AudioCallbackPlayTarget @@ -46,7 +48,7 @@ virtual void sourceModelReplaced(); protected: - int process(const void *input, void *output, int frames, + int process(const void *input, void *output, sv_frame_t frames, const PaStreamCallbackTimeInfo *timeInfo, PaStreamCallbackFlags statusFlags); diff -r 618d5816b04d -r 72c662fe7ea3 audioio/AudioPulseAudioTarget.cpp --- a/audioio/AudioPulseAudioTarget.cpp Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/AudioPulseAudioTarget.cpp Tue Mar 10 17:02:52 2015 +0000 @@ -57,7 +57,7 @@ m_bufferSize = 20480; m_sampleRate = 44100; if (m_source && (m_source->getSourceSampleRate() != 0)) { - m_sampleRate = m_source->getSourceSampleRate(); + m_sampleRate = int(m_source->getSourceSampleRate()); } m_spec.rate = m_sampleRate; m_spec.channels = 2; @@ -141,7 +141,7 @@ pa_usec_t usec = 0; pa_stream_get_time(m_stream, &usec); - return usec / 1000000.f; + return double(usec) / 1000000.0; } void @@ -151,19 +151,19 @@ } void -AudioPulseAudioTarget::streamWriteStatic(pa_stream *stream, +AudioPulseAudioTarget::streamWriteStatic(pa_stream *, size_t length, void *data) { AudioPulseAudioTarget *target = (AudioPulseAudioTarget *)data; - assert(stream == target->m_stream); +// assert(stream == target->m_stream); target->streamWrite(length); } void -AudioPulseAudioTarget::streamWrite(int requested) +AudioPulseAudioTarget::streamWrite(sv_frame_t requested) { #ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY cout << "AudioPulseAudioTarget::streamWrite(" << requested << ")" << endl; @@ -175,21 +175,21 @@ pa_usec_t latency = 0; int negative = 0; if (!pa_stream_get_latency(m_stream, &latency, &negative)) { - int latframes = (latency / 1000000.f) * float(m_sampleRate); + int latframes = int(double(latency) / 1000000.0 * double(m_sampleRate)); if (latframes > 0) m_source->setTargetPlayLatency(latframes); } static float *output = 0; static float **tmpbuf = 0; static int tmpbufch = 0; - static int tmpbufsz = 0; + static sv_frame_t tmpbufsz = 0; int sourceChannels = m_source->getSourceChannelCount(); // Because we offer pan, we always want at least 2 channels if (sourceChannels < 2) sourceChannels = 2; - int nframes = requested / (sourceChannels * sizeof(float)); + sv_frame_t nframes = requested / (sourceChannels * sizeof(float)); if (nframes > m_bufferSize) { cerr << "WARNING: AudioPulseAudioTarget::streamWrite: nframes " << nframes << " > m_bufferSize " << m_bufferSize << endl; @@ -223,7 +223,7 @@ output = new float[tmpbufsz * tmpbufch]; } - int received = m_source->getSourceSamples(nframes, tmpbuf); + sv_frame_t received = m_source->getSourceSamples(nframes, tmpbuf); #ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY cerr << "requested " << nframes << ", received " << received << endl; @@ -268,12 +268,12 @@ } void -AudioPulseAudioTarget::streamStateChangedStatic(pa_stream *stream, +AudioPulseAudioTarget::streamStateChangedStatic(pa_stream *, void *data) { AudioPulseAudioTarget *target = (AudioPulseAudioTarget *)data; - assert(stream == target->m_stream); +// assert(stream == target->m_stream); target->streamStateChanged(); } @@ -303,7 +303,7 @@ cerr << "AudioPulseAudioTarget::streamStateChanged: Failed to query latency" << endl; } cerr << "Latency = " << latency << " usec" << endl; - int latframes = (latency / 1000000.f) * float(m_sampleRate); + int latframes = int(double(latency) / 1000000.0 * m_sampleRate); cerr << "that's " << latframes << " frames" << endl; const pa_buffer_attr *attr; @@ -334,12 +334,12 @@ } void -AudioPulseAudioTarget::contextStateChangedStatic(pa_context *context, +AudioPulseAudioTarget::contextStateChangedStatic(pa_context *, void *data) { AudioPulseAudioTarget *target = (AudioPulseAudioTarget *)data; - assert(context == target->m_context); +// assert(context == target->m_context); target->contextStateChanged(); } diff -r 618d5816b04d -r 72c662fe7ea3 audioio/AudioPulseAudioTarget.h --- a/audioio/AudioPulseAudioTarget.h Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/AudioPulseAudioTarget.h Tue Mar 10 17:02:52 2015 +0000 @@ -46,7 +46,7 @@ virtual void sourceModelReplaced(); protected: - void streamWrite(int); + void streamWrite(sv_frame_t); void streamStateChanged(); void contextStateChanged(); diff -r 618d5816b04d -r 72c662fe7ea3 audioio/ClipMixer.cpp --- a/audioio/ClipMixer.cpp Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/ClipMixer.cpp Tue Mar 10 17:02:52 2015 +0000 @@ -20,7 +20,7 @@ #include "base/Debug.h" -ClipMixer::ClipMixer(int channels, int sampleRate, int blockSize) : +ClipMixer::ClipMixer(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize) : m_channels(channels), m_sampleRate(sampleRate), m_blockSize(blockSize), @@ -43,7 +43,7 @@ } bool -ClipMixer::loadClipData(QString path, float f0, float level) +ClipMixer::loadClipData(QString path, double f0, double level) { if (m_clipData) { cerr << "ClipMixer::loadClipData: Already have clip loaded" << endl; @@ -53,7 +53,7 @@ SF_INFO info; SNDFILE *file; float *tmpFrames; - int i; + sv_frame_t i; info.format = 0; file = sf_open(path.toLocal8Bit().data(), SFM_READ, &info); @@ -83,7 +83,7 @@ int j; m_clipData[i] = 0.0f; for (j = 0; j < info.channels; ++j) { - m_clipData[i] += tmpFrames[i * info.channels + j] * level; + m_clipData[i] += tmpFrames[i * info.channels + j] * float(level); } } @@ -102,19 +102,19 @@ m_playing.clear(); } -float -ClipMixer::getResampleRatioFor(float frequency) +double +ClipMixer::getResampleRatioFor(double frequency) { if (!m_clipData || !m_clipRate) return 1.0; - float pitchRatio = m_clipF0 / frequency; - float resampleRatio = m_sampleRate / m_clipRate; + double pitchRatio = m_clipF0 / frequency; + double resampleRatio = m_sampleRate / m_clipRate; return pitchRatio * resampleRatio; } -int -ClipMixer::getResampledClipDuration(float frequency) +sv_frame_t +ClipMixer::getResampledClipDuration(double frequency) { - return int(ceil(m_clipLength * getResampleRatioFor(frequency))); + return sv_frame_t(ceil(double(m_clipLength) * getResampleRatioFor(frequency))); } void @@ -146,12 +146,12 @@ levels[c] = note.level * gain; } if (note.pan != 0.0 && m_channels == 2) { - levels[0] *= 1.0 - note.pan; - levels[1] *= note.pan + 1.0; + levels[0] *= 1.0f - note.pan; + levels[1] *= note.pan + 1.0f; } - int start = note.frameOffset; - int durationHere = m_blockSize; + sv_frame_t start = note.frameOffset; + sv_frame_t durationHere = m_blockSize; if (start > 0) durationHere = m_blockSize - start; bool ending = false; @@ -167,7 +167,7 @@ } } - int clipDuration = getResampledClipDuration(note.frequency); + sv_frame_t clipDuration = getResampledClipDuration(note.frequency); if (start + clipDuration > 0) { if (start < 0 && start + clipDuration < durationHere) { durationHere = start + clipDuration; @@ -199,46 +199,46 @@ ClipMixer::mixNote(float **toBuffers, float *levels, float frequency, - int sourceOffset, - int targetOffset, - int sampleCount, + sv_frame_t sourceOffset, + sv_frame_t targetOffset, + sv_frame_t sampleCount, bool isEnd) { if (!m_clipData) return; - float ratio = getResampleRatioFor(frequency); + double ratio = getResampleRatioFor(frequency); - float releaseTime = 0.01; - int releaseSampleCount = round(releaseTime * m_sampleRate); + double releaseTime = 0.01; + sv_frame_t releaseSampleCount = sv_frame_t(round(releaseTime * m_sampleRate)); if (releaseSampleCount > sampleCount) { releaseSampleCount = sampleCount; } - float releaseFraction = 1.f/releaseSampleCount; + double releaseFraction = 1.0/double(releaseSampleCount); - for (int i = 0; i < sampleCount; ++i) { + for (sv_frame_t i = 0; i < sampleCount; ++i) { - int s = sourceOffset + i; + sv_frame_t s = sourceOffset + i; - float os = s / ratio; - int osi = int(floor(os)); + double os = double(s) / ratio; + sv_frame_t osi = sv_frame_t(floor(os)); //!!! just linear interpolation for now (same as SV's sample //!!! player). a small sinc kernel would be better and //!!! probably "good enough" - float value = 0.f; + double value = 0.0; if (osi < m_clipLength) { value += m_clipData[osi]; } if (osi + 1 < m_clipLength) { - value += (m_clipData[osi + 1] - m_clipData[osi]) * (os - osi); + value += (m_clipData[osi + 1] - m_clipData[osi]) * (os - double(osi)); } if (isEnd && i + releaseSampleCount > sampleCount) { - value *= releaseFraction * (sampleCount - i); // linear ramp for release + value *= releaseFraction * double(sampleCount - i); // linear ramp for release } for (int c = 0; c < m_channels; ++c) { - toBuffers[c][targetOffset + i] += levels[c] * value; + toBuffers[c][targetOffset + i] += float(levels[c] * value); } } } diff -r 618d5816b04d -r 72c662fe7ea3 audioio/ClipMixer.h --- a/audioio/ClipMixer.h Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/ClipMixer.h Tue Mar 10 17:02:52 2015 +0000 @@ -19,6 +19,8 @@ #include #include +#include "base/BaseTypes.h" + /** * Mix in synthetic notes produced by resampling a prerecorded * clip. (i.e. this is an implementation of a digital sampler in the @@ -29,7 +31,7 @@ class ClipMixer { public: - ClipMixer(int channels, int sampleRate, int blockSize); + ClipMixer(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize); ~ClipMixer(); void setChannelCount(int channels); @@ -41,19 +43,19 @@ * and should be scaled by level (in the range 0-1) when playing * back. */ - bool loadClipData(QString clipFilePath, float clipF0, float level); + bool loadClipData(QString clipFilePath, double clipF0, double level); void reset(); // discarding any playing notes struct NoteStart { - int frameOffset; // within current processing block + sv_frame_t frameOffset; // within current processing block float frequency; // Hz float level; // volume in range (0,1] float pan; // range [-1,1] }; struct NoteEnd { - int frameOffset; // in current processing block + sv_frame_t frameOffset; // in current processing block float frequency; // matching note start }; @@ -64,27 +66,27 @@ private: int m_channels; - int m_sampleRate; - int m_blockSize; + sv_samplerate_t m_sampleRate; + sv_frame_t m_blockSize; QString m_clipPath; float *m_clipData; - int m_clipLength; - float m_clipF0; - float m_clipRate; + sv_frame_t m_clipLength; + double m_clipF0; + sv_samplerate_t m_clipRate; std::vector m_playing; - float getResampleRatioFor(float frequency); - int getResampledClipDuration(float frequency); + double getResampleRatioFor(double frequency); + sv_frame_t getResampledClipDuration(double frequency); void mixNote(float **toBuffers, float *levels, float frequency, - int sourceOffset, // within resampled note - int targetOffset, // within target buffer - int sampleCount, + sv_frame_t sourceOffset, // within resampled note + sv_frame_t targetOffset, // within target buffer + sv_frame_t sampleCount, bool isEnd); }; diff -r 618d5816b04d -r 72c662fe7ea3 audioio/ContinuousSynth.cpp --- a/audioio/ContinuousSynth.cpp Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/ContinuousSynth.cpp Tue Mar 10 17:02:52 2015 +0000 @@ -19,11 +19,11 @@ #include -ContinuousSynth::ContinuousSynth(int channels, int sampleRate, int blockSize, int waveType) : +ContinuousSynth::ContinuousSynth(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize, int waveType) : m_channels(channels), m_sampleRate(sampleRate), m_blockSize(blockSize), - m_prevF0(-1.f), + m_prevF0(-1.0), m_phase(0.0), m_wavetype(waveType) // 0: 3 sinusoids, 1: 1 sinusoid, 2: sawtooth, 3: square { @@ -40,46 +40,47 @@ } void -ContinuousSynth::mix(float **toBuffers, float gain, float pan, float f0) +ContinuousSynth::mix(float **toBuffers, float gain, float pan, float f0f) { - if (f0 == 0.f) f0 = m_prevF0; + double f0(f0f); + if (f0 == 0.0) f0 = m_prevF0; - bool wasOn = (m_prevF0 > 0.f); - bool nowOn = (f0 > 0.f); + bool wasOn = (m_prevF0 > 0.0); + bool nowOn = (f0 > 0.0); if (!nowOn && !wasOn) { - m_phase = 0; - return; + m_phase = 0; + return; } - int fadeLength = 100; // samples + sv_frame_t fadeLength = 100; float *levels = new float[m_channels]; for (int c = 0; c < m_channels; ++c) { - levels[c] = gain * 0.5; // scale gain otherwise too loud compared to source + levels[c] = gain * 0.5f; // scale gain otherwise too loud compared to source } if (pan != 0.0 && m_channels == 2) { - levels[0] *= 1.0 - pan; - levels[1] *= pan + 1.0; + levels[0] *= 1.0f - pan; + levels[1] *= pan + 1.0f; } // cerr << "ContinuousSynth::mix: f0 = " << f0 << " (from " << m_prevF0 << "), phase = " << m_phase << endl; - for (int i = 0; i < m_blockSize; ++i) { + for (sv_frame_t i = 0; i < m_blockSize; ++i) { double fHere = (nowOn ? f0 : m_prevF0); if (wasOn && nowOn && (f0 != m_prevF0) && (i < fadeLength)) { // interpolate the frequency shift - fHere = m_prevF0 + ((f0 - m_prevF0) * i) / fadeLength; + fHere = m_prevF0 + ((f0 - m_prevF0) * double(i)) / double(fadeLength); } double phasor = (fHere * 2 * M_PI) / m_sampleRate; m_phase = m_phase + phasor; - int harmonics = (m_sampleRate / 4) / fHere - 1; + int harmonics = int((m_sampleRate / 4) / fHere - 1); if (harmonics < 1) harmonics = 1; switch (m_wavetype) { @@ -95,7 +96,6 @@ break; } - for (int h = 0; h < harmonics; ++h) { double v = 0; @@ -129,15 +129,15 @@ if (!wasOn && i < fadeLength) { // fade in - v = v * (i / double(fadeLength)); + v = v * (double(i) / double(fadeLength)); } else if (!nowOn) { // fade out if (i > fadeLength) v = 0; - else v = v * (1.0 - (i / double(fadeLength))); + else v = v * (1.0 - (double(i) / double(fadeLength))); } for (int c = 0; c < m_channels; ++c) { - toBuffers[c][i] += levels[c] * v; + toBuffers[c][i] += float(levels[c] * v); } } } diff -r 618d5816b04d -r 72c662fe7ea3 audioio/ContinuousSynth.h --- a/audioio/ContinuousSynth.h Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/ContinuousSynth.h Tue Mar 10 17:02:52 2015 +0000 @@ -15,6 +15,8 @@ #ifndef CONTINUOUS_SYNTH_H #define CONTINUOUS_SYNTH_H +#include "base/BaseTypes.h" + /** * Mix into a target buffer a signal synthesised so as to sound at a * specific frequency. The frequency may change with each processing @@ -24,7 +26,7 @@ class ContinuousSynth { public: - ContinuousSynth(int channels, int sampleRate, int blockSize, int waveType); + ContinuousSynth(int channels, sv_samplerate_t sampleRate, sv_frame_t blockSize, int waveType); ~ContinuousSynth(); void setChannelCount(int channels); @@ -45,14 +47,14 @@ * sound switches on and off cleanly. */ void mix(float **toBuffers, - float gain, - float pan, - float f0); + float gain, + float pan, + float f0); private: int m_channels; - int m_sampleRate; - int m_blockSize; + sv_samplerate_t m_sampleRate; + sv_frame_t m_blockSize; double m_prevF0; double m_phase; diff -r 618d5816b04d -r 72c662fe7ea3 audioio/PlaySpeedRangeMapper.cpp --- a/audioio/PlaySpeedRangeMapper.cpp Tue Mar 10 13:22:10 2015 +0000 +++ b/audioio/PlaySpeedRangeMapper.cpp Tue Mar 10 17:02:52 2015 +0000 @@ -49,8 +49,8 @@ int half = (m_maxpos + m_minpos) / 2; - factor = sqrtf((factor - 1.0) * 1000.f); - int position = lrintf(((factor * (half - m_minpos)) / 100.0) + m_minpos); + factor = sqrt((factor - 1.0) * 1000.0); + int position = int(lrint(((factor * (half - m_minpos)) / 100.0) + m_minpos)); if (slow) { position = half - position; diff -r 618d5816b04d -r 72c662fe7ea3 configure --- a/configure Tue Mar 10 13:22:10 2015 +0000 +++ b/configure Tue Mar 10 17:02:52 2015 +0000 @@ -4343,9 +4343,10 @@ CXXFLAGS_MINIMAL="$AUTOCONF_CXXFLAGS" if test "x$GCC" = "xyes"; then - CXXFLAGS_DEBUG="-std=c++11 -Wall -Wextra -Werror -Woverloaded-virtual -Wformat-nonliteral -Wformat-security -Winit-self -Wswitch-enum -g -pipe" - CXXFLAGS_RELEASE="-std=c++11 -g0 -O2 -Wall -pipe" - CXXFLAGS_MINIMAL="-std=c++11 -g0 -O0" + CXXFLAGS_ANY="-std=c++11 -Wall -Wextra -Werror -Woverloaded-virtual -Wformat-nonliteral -Wformat-security -Winit-self -Wswitch-enum -Wconversion -pipe" + CXXFLAGS_DEBUG="$CXXFLAGS_ANY -g" + CXXFLAGS_RELEASE="$CXXFLAGS_ANY -g0 -O2" + CXXFLAGS_MINIMAL="$CXXFLAGS_ANY -g0 -O0" fi CXXFLAGS_BUILD="$CXXFLAGS_RELEASE" diff -r 618d5816b04d -r 72c662fe7ea3 configure.ac --- a/configure.ac Tue Mar 10 13:22:10 2015 +0000 +++ b/configure.ac Tue Mar 10 17:02:52 2015 +0000 @@ -53,9 +53,10 @@ CXXFLAGS_MINIMAL="$AUTOCONF_CXXFLAGS" if test "x$GCC" = "xyes"; then - CXXFLAGS_DEBUG="-std=c++11 -Wall -Wextra -Werror -Woverloaded-virtual -Wformat-nonliteral -Wformat-security -Winit-self -Wswitch-enum -g -pipe" - CXXFLAGS_RELEASE="-std=c++11 -g0 -O2 -Wall -pipe" - CXXFLAGS_MINIMAL="-std=c++11 -g0 -O0" + CXXFLAGS_ANY="-std=c++11 -Wall -Wextra -Werror -Woverloaded-virtual -Wformat-nonliteral -Wformat-security -Winit-self -Wswitch-enum -Wconversion -pipe" + CXXFLAGS_DEBUG="$CXXFLAGS_ANY -g" + CXXFLAGS_RELEASE="$CXXFLAGS_ANY -g0 -O2" + CXXFLAGS_MINIMAL="$CXXFLAGS_ANY -g0 -O0" fi CXXFLAGS_BUILD="$CXXFLAGS_RELEASE" diff -r 618d5816b04d -r 72c662fe7ea3 framework/Document.cpp --- a/framework/Document.cpp Tue Mar 10 13:22:10 2015 +0000 +++ b/framework/Document.cpp Tue Mar 10 17:02:52 2015 +0000 @@ -734,7 +734,7 @@ applied.setPluginVersion (TransformFactory::getInstance()-> getDefaultTransformFor(applied.getIdentifier(), - lrintf(applied.getSampleRate())) + applied.getSampleRate()) .getPluginVersion()); if (!model) { diff -r 618d5816b04d -r 72c662fe7ea3 framework/MainWindowBase.cpp --- a/framework/MainWindowBase.cpp Tue Mar 10 13:22:10 2015 +0000 +++ b/framework/MainWindowBase.cpp Tue Mar 10 17:02:52 2015 +0000 @@ -178,7 +178,7 @@ // set a sensible default font size for views -- cannot do this // in Preferences, which is in base and not supposed to use QtGui - int viewFontSize = QApplication::font().pointSize() * 0.9; + int viewFontSize = int(QApplication::font().pointSize() * 0.9); QSettings settings; settings.beginGroup("Preferences"); viewFontSize = settings.value("view-font-size", viewFontSize).toInt(); @@ -218,8 +218,8 @@ m_playSource = new AudioCallbackPlaySource(m_viewManager, QApplication::applicationName()); - connect(m_playSource, SIGNAL(sampleRateMismatch(int, int, bool)), - this, SLOT(sampleRateMismatch(int, int, bool))); + connect(m_playSource, SIGNAL(sampleRateMismatch(sv_samplerate_t, sv_samplerate_t, bool)), + this, SLOT(sampleRateMismatch(sv_samplerate_t, sv_samplerate_t, bool))); connect(m_playSource, SIGNAL(audioOverloadPluginDisabled()), this, SLOT(audioOverloadPluginDisabled())); connect(m_playSource, SIGNAL(audioTimeStretchMultiChannelDisabled()), @@ -2380,7 +2380,7 @@ else pixels = 1; if (pixels > 4) pixels -= 4; - int zoomLevel = (end - start) / pixels; + int zoomLevel = int((end - start) / pixels); if (zoomLevel < 1) zoomLevel = 1; currentPane->setZoomLevel(zoomLevel); diff -r 618d5816b04d -r 72c662fe7ea3 framework/MainWindowBase.h --- a/framework/MainWindowBase.h Tue Mar 10 13:22:10 2015 +0000 +++ b/framework/MainWindowBase.h Tue Mar 10 17:02:52 2015 +0000 @@ -213,7 +213,7 @@ virtual void playSelectionToggled(); virtual void playSoloToggled(); - virtual void sampleRateMismatch(int, int, bool) = 0; + virtual void sampleRateMismatch(sv_samplerate_t, sv_samplerate_t, bool) = 0; virtual void audioOverloadPluginDisabled() = 0; virtual void audioTimeStretchMultiChannelDisabled() = 0; diff -r 618d5816b04d -r 72c662fe7ea3 framework/SVFileReader.cpp --- a/framework/SVFileReader.cpp Tue Mar 10 13:22:10 2015 +0000 +++ b/framework/SVFileReader.cpp Tue Mar 10 17:02:52 2015 +0000 @@ -450,7 +450,7 @@ SVDEBUG << "SVFileReader::readModel: model name \"" << name << "\"" << endl; - READ_MANDATORY(int, sampleRate, toInt); + READ_MANDATORY(double, sampleRate, toDouble); QString type = attributes.value("type").trimmed(); bool isMainModel = (attributes.value("mainModel").trimmed() == "true"); @@ -478,7 +478,7 @@ file.waitForData(); - int rate = sampleRate; + sv_samplerate_t rate = sampleRate; if (Preferences::getInstance()->getFixedSampleRate() != 0) { rate = Preferences::getInstance()->getFixedSampleRate(); diff -r 618d5816b04d -r 72c662fe7ea3 framework/TransformUserConfigurator.cpp --- a/framework/TransformUserConfigurator.cpp Tue Mar 10 13:22:10 2015 +0000 +++ b/framework/TransformUserConfigurator.cpp Tue Mar 10 17:02:52 2015 +0000 @@ -46,8 +46,8 @@ if (plugin && plugin->getType() == "Feature Extraction Plugin") { Vamp::Plugin *vp = static_cast(plugin); SVDEBUG << "TransformUserConfigurator::getChannelRange: is a VP" << endl; - minChannels = vp->getMinChannelCount(); - maxChannels = vp->getMaxChannelCount(); + minChannels = int(vp->getMinChannelCount()); + maxChannels = int(vp->getMaxChannelCount()); return true; } else { SVDEBUG << "TransformUserConfigurator::getChannelRange: is not a VP" << endl;