Mercurial > hg > svapp
changeset 366:0876ea394902 warnfix_no_size_t
Remove size_t's, fix compiler warnings
author | Chris Cannam |
---|---|
date | Tue, 17 Jun 2014 16:23:06 +0100 |
parents | e7a3fa8f4eec |
children | 1e4fa2007e61 |
files | audioio/AudioCallbackPlaySource.cpp audioio/AudioCallbackPlaySource.h audioio/AudioGenerator.cpp audioio/AudioGenerator.h audioio/AudioJACKTarget.cpp audioio/AudioPortAudioTarget.cpp audioio/AudioPulseAudioTarget.cpp audioio/AudioPulseAudioTarget.h audioio/ClipMixer.cpp framework/Document.cpp framework/MainWindowBase.cpp framework/MainWindowBase.h framework/SVFileReader.cpp framework/TransformUserConfigurator.cpp framework/TransformUserConfigurator.h |
diffstat | 15 files changed, 409 insertions(+), 402 deletions(-) [+] |
line wrap: on
line diff
--- a/audioio/AudioCallbackPlaySource.cpp Tue Jun 03 11:03:09 2014 +0100 +++ b/audioio/AudioCallbackPlaySource.cpp Tue Jun 17 16:23:06 2014 +0100 @@ -37,7 +37,7 @@ //#define DEBUG_AUDIO_PLAY_SOURCE 1 //#define DEBUG_AUDIO_PLAY_SOURCE_PLAYING 1 -static const size_t DEFAULT_RING_BUFFER_SIZE = 131071; +static const int DEFAULT_RING_BUFFER_SIZE = 131071; AudioCallbackPlaySource::AudioCallbackPlaySource(ViewManagerBase *manager, QString clientName) : @@ -127,7 +127,7 @@ delete m_audioGenerator; - for (size_t i = 0; i < m_stretcherInputCount; ++i) { + for (int i = 0; i < m_stretcherInputCount; ++i) { delete[] m_stretcherInputs[i]; } delete[] m_stretcherInputSizes; @@ -159,7 +159,7 @@ bool buffersChanged = false, srChanged = false; - size_t modelChannels = 1; + int modelChannels = 1; DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model); if (dtvm) modelChannels = dtvm->getChannelCount(); if (modelChannels > m_sourceChannelCount) { @@ -220,7 +220,7 @@ } } - if (!m_writeBuffers || (m_writeBuffers->size() < getTargetChannelCount())) { + if (!m_writeBuffers || (int)m_writeBuffers->size() < getTargetChannelCount()) { clearRingBuffers(true, getTargetChannelCount()); buffersChanged = true; } else { @@ -255,8 +255,8 @@ emit modelReplaced(); } - connect(model, SIGNAL(modelChanged(size_t, size_t)), - this, SLOT(modelChanged(size_t, size_t))); + connect(model, SIGNAL(modelChanged(int, int)), + this, SLOT(modelChanged(int, int))); #ifdef DEBUG_AUDIO_PLAY_SOURCE cout << "AudioCallbackPlaySource::addModel: awakening thread" << endl; @@ -266,7 +266,7 @@ } void -AudioCallbackPlaySource::modelChanged(size_t startFrame, size_t endFrame) +AudioCallbackPlaySource::modelChanged(int , int endFrame) { #ifdef DEBUG_AUDIO_PLAY_SOURCE SVDEBUG << "AudioCallbackPlaySource::modelChanged(" << startFrame << "," << endFrame << ")" << endl; @@ -286,8 +286,8 @@ cout << "AudioCallbackPlaySource::removeModel(" << model << ")" << endl; #endif - disconnect(model, SIGNAL(modelChanged(size_t, size_t)), - this, SLOT(modelChanged(size_t, size_t))); + disconnect(model, SIGNAL(modelChanged(int, int)), + this, SLOT(modelChanged(int, int))); m_models.erase(model); @@ -301,7 +301,7 @@ m_sourceSampleRate = 0; } - size_t lastEnd = 0; + int lastEnd = 0; for (std::set<Model *>::const_iterator i = m_models.begin(); i != m_models.end(); ++i) { #ifdef DEBUG_AUDIO_PLAY_SOURCE @@ -351,7 +351,7 @@ } void -AudioCallbackPlaySource::clearRingBuffers(bool haveLock, size_t count) +AudioCallbackPlaySource::clearRingBuffers(bool haveLock, int count) { if (!haveLock) m_mutex.lock(); @@ -369,7 +369,7 @@ m_writeBuffers = new RingBufferVector; - for (size_t i = 0; i < count; ++i) { + for (int i = 0; i < count; ++i) { m_writeBuffers->push_back(new RingBuffer<float>(m_ringBufferSize)); } @@ -382,7 +382,7 @@ } void -AudioCallbackPlaySource::play(size_t startFrame) +AudioCallbackPlaySource::play(int startFrame) { if (m_viewManager->getPlaySelectionMode() && !m_viewManager->getSelections().empty()) { @@ -425,7 +425,7 @@ m_readBufferFill = m_writeBufferFill = startFrame; if (m_readBuffers) { - for (size_t c = 0; c < getTargetChannelCount(); ++c) { + for (int c = 0; c < getTargetChannelCount(); ++c) { RingBuffer<float> *rb = getReadRingBuffer(c); #ifdef DEBUG_AUDIO_PLAY_SOURCE cerr << "reset ring buffer for channel " << c << endl; @@ -550,7 +550,7 @@ } void -AudioCallbackPlaySource::setTarget(AudioCallbackPlayTarget *target, size_t size) +AudioCallbackPlaySource::setTarget(AudioCallbackPlayTarget *target, int size) { m_target = target; cout << "AudioCallbackPlaySource::setTarget: Block size -> " << size << endl; @@ -569,7 +569,7 @@ } } -size_t +int AudioCallbackPlaySource::getTargetBlockSize() const { // cout << "AudioCallbackPlaySource::getTargetBlockSize() -> " << m_blockSize << endl; @@ -577,65 +577,62 @@ } void -AudioCallbackPlaySource::setTargetPlayLatency(size_t latency) +AudioCallbackPlaySource::setTargetPlayLatency(int latency) { m_playLatency = latency; } -size_t +int AudioCallbackPlaySource::getTargetPlayLatency() const { return m_playLatency; } -size_t +int AudioCallbackPlaySource::getCurrentPlayingFrame() { // This method attempts to estimate which audio sample frame is // "currently coming through the speakers". - size_t targetRate = getTargetSampleRate(); - size_t latency = m_playLatency; // at target rate + int targetRate = getTargetSampleRate(); + int latency = m_playLatency; // at target rate RealTime latency_t = RealTime::frame2RealTime(latency, targetRate); return getCurrentFrame(latency_t); } -size_t +int AudioCallbackPlaySource::getCurrentBufferedFrame() { return getCurrentFrame(RealTime::zeroTime); } -size_t +int AudioCallbackPlaySource::getCurrentFrame(RealTime latency_t) { - bool resample = false; - double resampleRatio = 1.0; - // We resample when filling the ring buffer, and time-stretch when // draining it. The buffer contains data at the "target rate" and // the latency provided by the target is also at the target rate. // Because of the multiple rates involved, we do the actual // calculation using RealTime instead. - size_t sourceRate = getSourceSampleRate(); - size_t targetRate = getTargetSampleRate(); + int sourceRate = getSourceSampleRate(); + int targetRate = getTargetSampleRate(); if (sourceRate == 0 || targetRate == 0) return 0; - size_t inbuffer = 0; // at target rate + int inbuffer = 0; // at target rate - for (size_t c = 0; c < getTargetChannelCount(); ++c) { + for (int c = 0; c < getTargetChannelCount(); ++c) { RingBuffer<float> *rb = getReadRingBuffer(c); if (rb) { - size_t here = rb->getReadSpace(); + int here = rb->getReadSpace(); if (c == 0 || here < inbuffer) inbuffer = here; } } - size_t readBufferFill = m_readBufferFill; - size_t lastRetrievedBlockSize = m_lastRetrievedBlockSize; + int readBufferFill = m_readBufferFill; + int lastRetrievedBlockSize = m_lastRetrievedBlockSize; double lastRetrievalTimestamp = m_lastRetrievalTimestamp; double currentTime = 0.0; if (m_target) currentTime = m_target->getCurrentTime(); @@ -644,7 +641,7 @@ RealTime inbuffer_t = RealTime::frame2RealTime(inbuffer, targetRate); - size_t stretchlat = 0; + int stretchlat = 0; double timeRatio = 1.0; if (m_timeStretcher) { @@ -704,8 +701,6 @@ cerr << "\nbuffered to: " << bufferedto_t << ", in buffer: " << inbuffer_t << ", time ratio " << timeRatio << "\n stretcher latency: " << stretchlat_t << ", device latency: " << latency_t << "\n since request: " << sincerequest_t << ", last retrieved quantity: " << lastretrieved_t << endl; #endif - RealTime end = RealTime::frame2RealTime(m_lastModelEndFrame, sourceRate); - // Normally the range lists should contain at least one item each // -- if playback is unconstrained, that item should report the // entire source audio duration. @@ -720,14 +715,14 @@ - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t + sincerequest_t; if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime; - size_t frame = RealTime::realTime2Frame(playing_t, sourceRate); + int frame = RealTime::realTime2Frame(playing_t, sourceRate); return m_viewManager->alignPlaybackFrameToReference(frame); } int inRange = 0; int index = 0; - for (size_t i = 0; i < m_rangeStarts.size(); ++i) { + for (int i = 0; i < (int)m_rangeStarts.size(); ++i) { if (bufferedto_t >= m_rangeStarts[i]) { inRange = index; } else { @@ -736,7 +731,7 @@ ++index; } - if (inRange >= m_rangeStarts.size()) inRange = m_rangeStarts.size()-1; + if (inRange >= (int)m_rangeStarts.size()) inRange = m_rangeStarts.size()-1; RealTime playing_t = bufferedto_t; @@ -805,7 +800,7 @@ #endif if (!looping) { - if (inRange == m_rangeStarts.size()-1 && + if (inRange == (int)m_rangeStarts.size()-1 && playing_t >= m_rangeStarts[inRange] + m_rangeDurations[inRange]) { cerr << "Not looping, inRange " << inRange << " == rangeStarts.size()-1, playing_t " << playing_t << " >= m_rangeStarts[inRange] " << m_rangeStarts[inRange] << " + m_rangeDurations[inRange] " << m_rangeDurations[inRange] << " -- stopping" << endl; stop(); @@ -814,7 +809,7 @@ if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime; - size_t frame = RealTime::realTime2Frame(playing_t, sourceRate); + int frame = RealTime::realTime2Frame(playing_t, sourceRate); if (m_lastCurrentFrame > 0 && !looping) { if (frame < m_lastCurrentFrame) { @@ -835,7 +830,7 @@ m_rangeStarts.clear(); m_rangeDurations.clear(); - size_t sourceRate = getSourceSampleRate(); + int sourceRate = getSourceSampleRate(); if (sourceRate == 0) return; RealTime end = RealTime::frame2RealTime(m_lastModelEndFrame, sourceRate); @@ -897,7 +892,7 @@ } void -AudioCallbackPlaySource::setTargetSampleRate(size_t sr) +AudioCallbackPlaySource::setTargetSampleRate(int sr) { bool first = (m_targetSampleRate == 0); @@ -1014,27 +1009,27 @@ clearRingBuffers(); } -size_t +int AudioCallbackPlaySource::getTargetSampleRate() const { if (m_targetSampleRate) return m_targetSampleRate; else return getSourceSampleRate(); } -size_t +int AudioCallbackPlaySource::getSourceChannelCount() const { return m_sourceChannelCount; } -size_t +int AudioCallbackPlaySource::getTargetChannelCount() const { if (m_sourceChannelCount < 2) return 2; return m_sourceChannelCount; } -size_t +int AudioCallbackPlaySource::getSourceSampleRate() const { return m_sourceSampleRate; @@ -1062,8 +1057,8 @@ RubberBandStretcher::OptionProcessRealTime, factor); m_stretcherInputs = new float *[m_stretcherInputCount]; - m_stretcherInputSizes = new size_t[m_stretcherInputCount]; - for (size_t c = 0; c < m_stretcherInputCount; ++c) { + m_stretcherInputSizes = new int[m_stretcherInputCount]; + for (int c = 0; c < m_stretcherInputCount; ++c) { m_stretcherInputSizes[c] = 16384; m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]]; } @@ -1074,8 +1069,8 @@ emit activity(tr("Change time-stretch factor to %1").arg(factor)); } -size_t -AudioCallbackPlaySource::getSourceSamples(size_t ucount, float **buffer) +int +AudioCallbackPlaySource::getSourceSamples(int ucount, float **buffer) { int count = ucount; @@ -1083,7 +1078,7 @@ #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING SVDEBUG << "AudioCallbackPlaySource::getSourceSamples: Not playing" << endl; #endif - for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { + for (int ch = 0; ch < getTargetChannelCount(); ++ch) { for (int i = 0; i < count; ++i) { buffer[ch][i] = 0.0; } @@ -1098,7 +1093,7 @@ // Ensure that all buffers have at least the amount of data we // need -- else reduce the size of our requests correspondingly - for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { + for (int ch = 0; ch < getTargetChannelCount(); ++ch) { RingBuffer<float> *rb = getReadRingBuffer(ch); @@ -1110,7 +1105,7 @@ break; } - size_t rs = rb->getReadSpace(); + int rs = rb->getReadSpace(); if (rs < count) { #ifdef DEBUG_AUDIO_PLAY_SOURCE cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: " @@ -1161,7 +1156,7 @@ int got = 0; - for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { + for (int ch = 0; ch < getTargetChannelCount(); ++ch) { RingBuffer<float> *rb = getReadRingBuffer(ch); @@ -1169,7 +1164,7 @@ // this is marginally more likely to leave our channels in // sync after a processing failure than just passing "count": - size_t request = count; + int request = count; if (ch > 0) request = got; got = rb->read(buffer[ch], request); @@ -1179,7 +1174,7 @@ #endif } - for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { + for (int ch = 0; ch < getTargetChannelCount(); ++ch) { for (int i = got; i < count; ++i) { buffer[ch][i] = 0.0; } @@ -1197,27 +1192,27 @@ return got; } - size_t channels = getTargetChannelCount(); - size_t available; + int channels = getTargetChannelCount(); + int available; int warned = 0; - size_t fedToStretcher = 0; + int fedToStretcher = 0; // The input block for a given output is approx output / ratio, // but we can't predict it exactly, for an adaptive timestretcher. while ((available = ts->available()) < count) { - size_t reqd = lrintf((count - available) / ratio); - reqd = std::max(reqd, ts->getSamplesRequired()); + int reqd = lrintf((count - available) / ratio); + reqd = std::max(reqd, (int)ts->getSamplesRequired()); if (reqd == 0) reqd = 1; - size_t got = reqd; + int got = reqd; #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING cerr << "reqd = " <<reqd << ", channels = " << channels << ", ic = " << m_stretcherInputCount << endl; #endif - for (size_t c = 0; c < channels; ++c) { + for (int c = 0; c < channels; ++c) { if (c >= m_stretcherInputCount) continue; if (reqd > m_stretcherInputSizes[c]) { if (c == 0) { @@ -1229,11 +1224,11 @@ } } - for (size_t c = 0; c < channels; ++c) { + for (int c = 0; c < channels; ++c) { if (c >= m_stretcherInputCount) continue; RingBuffer<float> *rb = getReadRingBuffer(c); if (rb) { - size_t gotHere; + int gotHere; if (stretchChannels == 1 && c > 0) { gotHere = rb->readAdding(m_stretcherInputs[0], got); } else { @@ -1290,25 +1285,25 @@ } void -AudioCallbackPlaySource::applyAuditioningEffect(size_t count, float **buffers) +AudioCallbackPlaySource::applyAuditioningEffect(int count, float **buffers) { if (m_auditioningPluginBypassed) return; RealTimePluginInstance *plugin = m_auditioningPlugin; if (!plugin) return; - if (plugin->getAudioInputCount() != getTargetChannelCount()) { + if ((int)plugin->getAudioInputCount() != getTargetChannelCount()) { // cerr << "plugin input count " << plugin->getAudioInputCount() // << " != our channel count " << getTargetChannelCount() // << endl; return; } - if (plugin->getAudioOutputCount() != getTargetChannelCount()) { + if ((int)plugin->getAudioOutputCount() != getTargetChannelCount()) { // cerr << "plugin output count " << plugin->getAudioOutputCount() // << " != our channel count " << getTargetChannelCount() // << endl; return; } - if (plugin->getBufferSize() < count) { + if ((int)plugin->getBufferSize() < count) { // cerr << "plugin buffer size " << plugin->getBufferSize() // << " < our block size " << count // << endl; @@ -1318,16 +1313,16 @@ float **ib = plugin->getAudioInputBuffers(); float **ob = plugin->getAudioOutputBuffers(); - for (size_t c = 0; c < getTargetChannelCount(); ++c) { - for (size_t i = 0; i < count; ++i) { + for (int c = 0; c < getTargetChannelCount(); ++c) { + for (int i = 0; i < count; ++i) { ib[c][i] = buffers[c][i]; } } plugin->run(Vamp::RealTime::zeroTime, count); - for (size_t c = 0; c < getTargetChannelCount(); ++c) { - for (size_t i = 0; i < count; ++i) { + for (int c = 0; c < getTargetChannelCount(); ++c) { + for (int i = 0; i < count; ++i) { buffers[c][i] = ob[c][i]; } } @@ -1338,13 +1333,13 @@ AudioCallbackPlaySource::fillBuffers() { static float *tmp = 0; - static size_t tmpSize = 0; + static int tmpSize = 0; - size_t space = 0; - for (size_t c = 0; c < getTargetChannelCount(); ++c) { + int space = 0; + for (int c = 0; c < getTargetChannelCount(); ++c) { RingBuffer<float> *wb = getWriteRingBuffer(c); if (wb) { - size_t spaceHere = wb->getWriteSpace(); + int spaceHere = wb->getWriteSpace(); if (c == 0 || spaceHere < space) space = spaceHere; } } @@ -1356,7 +1351,7 @@ return false; } - size_t f = m_writeBufferFill; + int f = m_writeBufferFill; bool readWriteEqual = (m_readBuffers == m_writeBuffers); @@ -1377,13 +1372,13 @@ cout << (resample ? "" : "not ") << "resampling (source " << getSourceSampleRate() << ", target " << getTargetSampleRate() << ")" << endl; #endif - size_t channels = getTargetChannelCount(); + int channels = getTargetChannelCount(); - size_t orig = space; - size_t got = 0; + int orig = space; + int got = 0; static float **bufferPtrs = 0; - static size_t bufferPtrCount = 0; + static int bufferPtrCount = 0; if (bufferPtrCount < channels) { if (bufferPtrs) delete[] bufferPtrs; @@ -1391,7 +1386,7 @@ bufferPtrCount = channels; } - size_t generatorBlockSize = m_audioGenerator->getBlockSize(); + int generatorBlockSize = m_audioGenerator->getBlockSize(); if (resample && !m_converter) { static bool warned = false; @@ -1405,13 +1400,13 @@ double ratio = double(getTargetSampleRate()) / double(getSourceSampleRate()); - orig = size_t(orig / ratio + 0.1); + orig = int(orig / ratio + 0.1); // orig must be a multiple of generatorBlockSize orig = (orig / generatorBlockSize) * generatorBlockSize; if (orig == 0) return false; - size_t work = std::max(orig, space); + int work = std::max(orig, space); // We only allocate one buffer, but we use it in two halves. // We place the non-interleaved values in the second half of @@ -1434,21 +1429,21 @@ float *intlv = tmp; float *srcout = tmp + channels * work; - for (size_t c = 0; c < channels; ++c) { - for (size_t i = 0; i < orig; ++i) { + for (int c = 0; c < channels; ++c) { + for (int i = 0; i < orig; ++i) { nonintlv[channels * i + c] = 0.0f; } } - for (size_t c = 0; c < channels; ++c) { + for (int c = 0; c < channels; ++c) { bufferPtrs[c] = nonintlv + c * orig; } got = mixModels(f, orig, bufferPtrs); // also modifies f // and interleave into first half - for (size_t c = 0; c < channels; ++c) { - for (size_t i = 0; i < got; ++i) { + for (int c = 0; c < channels; ++c) { + for (int i = 0; i < got; ++i) { float sample = nonintlv[c * got + i]; intlv[channels * i + c] = sample; } @@ -1473,7 +1468,7 @@ err = src_process(m_converter, &data); } - size_t toCopy = size_t(got * ratio + 0.1); + int toCopy = int(got * ratio + 0.1); if (err) { cerr @@ -1488,8 +1483,8 @@ #endif } - for (size_t c = 0; c < channels; ++c) { - for (size_t i = 0; i < toCopy; ++i) { + for (int c = 0; c < channels; ++c) { + for (int i = 0; i < toCopy; ++i) { tmp[i] = srcout[channels * i + c]; } RingBuffer<float> *wb = getWriteRingBuffer(c); @@ -1502,7 +1497,7 @@ } else { // space must be a multiple of generatorBlockSize - size_t reqSpace = space; + int reqSpace = space; space = (reqSpace / generatorBlockSize) * generatorBlockSize; if (space == 0) { #ifdef DEBUG_AUDIO_PLAY_SOURCE @@ -1519,22 +1514,22 @@ tmpSize = channels * space; } - for (size_t c = 0; c < channels; ++c) { + for (int c = 0; c < channels; ++c) { bufferPtrs[c] = tmp + c * space; - for (size_t i = 0; i < space; ++i) { + for (int i = 0; i < space; ++i) { tmp[c * space + i] = 0.0f; } } - size_t got = mixModels(f, space, bufferPtrs); // also modifies f + int got = mixModels(f, space, bufferPtrs); // also modifies f - for (size_t c = 0; c < channels; ++c) { + for (int c = 0; c < channels; ++c) { RingBuffer<float> *wb = getWriteRingBuffer(c); if (wb) { - size_t actual = wb->write(bufferPtrs[c], got); + int actual = wb->write(bufferPtrs[c], got); #ifdef DEBUG_AUDIO_PLAY_SOURCE cout << "Wrote " << actual << " samples for ch " << c << ", now " << wb->getReadSpace() << " to read" @@ -1561,22 +1556,22 @@ return true; } -size_t -AudioCallbackPlaySource::mixModels(size_t &frame, size_t count, float **buffers) +int +AudioCallbackPlaySource::mixModels(int &frame, int count, float **buffers) { - size_t processed = 0; - size_t chunkStart = frame; - size_t chunkSize = count; - size_t selectionSize = 0; - size_t nextChunkStart = chunkStart + chunkSize; + int processed = 0; + int chunkStart = frame; + int chunkSize = count; + int selectionSize = 0; + int nextChunkStart = chunkStart + chunkSize; bool looping = m_viewManager->getPlayLoopMode(); bool constrained = (m_viewManager->getPlaySelectionMode() && !m_viewManager->getSelections().empty()); static float **chunkBufferPtrs = 0; - static size_t chunkBufferPtrCount = 0; - size_t channels = getTargetChannelCount(); + static int chunkBufferPtrCount = 0; + int channels = getTargetChannelCount(); #ifdef DEBUG_AUDIO_PLAY_SOURCE cout << "Selection playback: start " << frame << ", size " << count <<", channels " << channels << endl; @@ -1588,7 +1583,7 @@ chunkBufferPtrCount = channels; } - for (size_t c = 0; c < channels; ++c) { + for (int c = 0; c < channels; ++c) { chunkBufferPtrs[c] = buffers[c]; } @@ -1598,11 +1593,11 @@ nextChunkStart = chunkStart + chunkSize; selectionSize = 0; - size_t fadeIn = 0, fadeOut = 0; + int fadeIn = 0, fadeOut = 0; if (constrained) { - size_t rChunkStart = + int rChunkStart = m_viewManager->alignPlaybackFrameToReference(chunkStart); Selection selection = @@ -1624,9 +1619,9 @@ } else { - size_t sf = m_viewManager->alignReferenceToPlaybackFrame + int sf = m_viewManager->alignReferenceToPlaybackFrame (selection.getStartFrame()); - size_t ef = m_viewManager->alignReferenceToPlaybackFrame + int ef = m_viewManager->alignReferenceToPlaybackFrame (selection.getEndFrame()); selectionSize = ef - sf; @@ -1674,8 +1669,6 @@ cout << "Selection playback: chunk at " << chunkStart << " -> " << nextChunkStart << " (size " << chunkSize << ")" << endl; #endif - size_t got = 0; - if (selectionSize < 100) { fadeIn = 0; fadeOut = 0; @@ -1699,12 +1692,12 @@ for (std::set<Model *>::iterator mi = m_models.begin(); mi != m_models.end(); ++mi) { - got = m_audioGenerator->mixModel(*mi, chunkStart, - chunkSize, chunkBufferPtrs, - fadeIn, fadeOut); + (void) m_audioGenerator->mixModel(*mi, chunkStart, + chunkSize, chunkBufferPtrs, + fadeIn, fadeOut); } - for (size_t c = 0; c < channels; ++c) { + for (int c = 0; c < channels; ++c) { chunkBufferPtrs[c] += chunkSize; } @@ -1726,7 +1719,7 @@ if (m_readBuffers == m_writeBuffers) return; // only unify if there will be something to read - for (size_t c = 0; c < getTargetChannelCount(); ++c) { + for (int c = 0; c < getTargetChannelCount(); ++c) { RingBuffer<float> *wb = getWriteRingBuffer(c); if (wb) { if (wb->getReadSpace() < m_blockSize * 2) { @@ -1744,10 +1737,10 @@ } } - size_t rf = m_readBufferFill; + int rf = m_readBufferFill; RingBuffer<float> *rb = getReadRingBuffer(0); if (rb) { - size_t rs = rb->getReadSpace(); + int rs = rb->getReadSpace(); //!!! incorrect when in non-contiguous selection, see comments elsewhere // cout << "rs = " << rs << endl; if (rs < rf) rf -= rs; @@ -1758,14 +1751,14 @@ SVDEBUG << "AudioCallbackPlaySource::unifyRingBuffers: m_readBufferFill = " << m_readBufferFill << ", rf = " << rf << ", m_writeBufferFill = " << m_writeBufferFill << endl; #endif - size_t wf = m_writeBufferFill; - size_t skip = 0; - for (size_t c = 0; c < getTargetChannelCount(); ++c) { + int wf = m_writeBufferFill; + int skip = 0; + for (int c = 0; c < getTargetChannelCount(); ++c) { RingBuffer<float> *wb = getWriteRingBuffer(c); if (wb) { if (c == 0) { - size_t wrs = wb->getReadSpace(); + int wrs = wb->getReadSpace(); // cout << "wrs = " << wrs << endl; if (wrs < wf) wf -= wrs; @@ -1833,7 +1826,7 @@ cout << "AudioCallbackPlaySourceFillThread: waiting for " << ms << "ms..." << endl; #endif - s.m_condition.wait(&s.m_mutex, size_t(ms)); + s.m_condition.wait(&s.m_mutex, int(ms)); } #ifdef DEBUG_AUDIO_PLAY_SOURCE @@ -1855,7 +1848,7 @@ #ifdef DEBUG_AUDIO_PLAY_SOURCE cout << "AudioCallbackPlaySourceFillThread: playback state changed, resetting" << endl; #endif - for (size_t c = 0; c < s.getTargetChannelCount(); ++c) { + for (int c = 0; c < s.getTargetChannelCount(); ++c) { RingBuffer<float> *rb = s.getReadRingBuffer(c); if (rb) rb->reset(); }
--- a/audioio/AudioCallbackPlaySource.h Tue Jun 03 11:03:09 2014 +0100 +++ b/audioio/AudioCallbackPlaySource.h Tue Jun 17 16:23:06 2014 +0100 @@ -83,7 +83,7 @@ * from the given frame. If playback is already under way, reseek * to the given frame and continue. */ - virtual void play(size_t startFrame); + virtual void play(int startFrame); /** * Stop playback and ensure that no more data is returned. @@ -99,24 +99,24 @@ * Return the frame number that is currently expected to be coming * out of the speakers. (i.e. compensating for playback latency.) */ - virtual size_t getCurrentPlayingFrame(); + virtual int getCurrentPlayingFrame(); /** * Return the last frame that would come out of the speakers if we * stopped playback right now. */ - virtual size_t getCurrentBufferedFrame(); + virtual int getCurrentBufferedFrame(); /** * Return the frame at which playback is expected to end (if not looping). */ - virtual size_t getPlayEndFrame() { return m_lastModelEndFrame; } + virtual int getPlayEndFrame() { return m_lastModelEndFrame; } /** * Set the target and the block size of the target audio device. * This should be called by the target class. */ - void setTarget(AudioCallbackPlayTarget *, size_t blockSize); + void setTarget(AudioCallbackPlayTarget *, int blockSize); /** * Get the block size of the target audio device. This may be an @@ -124,7 +124,7 @@ * size; the source should behave itself even if this value turns * out to be inaccurate. */ - size_t getTargetBlockSize() const; + int getTargetBlockSize() const; /** * Set the playback latency of the target audio device, in frames @@ -133,12 +133,12 @@ * highest last frame across all channels) requested via * getSamples(). The default is zero. */ - void setTargetPlayLatency(size_t); + void setTargetPlayLatency(int); /** * Get the playback latency of the target audio device. */ - size_t getTargetPlayLatency() const; + int getTargetPlayLatency() const; /** * Specify that the target audio device has a fixed sample rate @@ -147,13 +147,13 @@ * source sample rate, this class will resample automatically to * fit. */ - void setTargetSampleRate(size_t); + void setTargetSampleRate(int); /** * Return the sample rate set by the target audio device (or the * source sample rate if the target hasn't set one). */ - virtual size_t getTargetSampleRate() const; + virtual int getTargetSampleRate() const; /** * Set the current output levels for metering (for call from the @@ -172,7 +172,7 @@ * This may safely be called from a realtime thread. Returns 0 if * there is no source yet available. */ - size_t getSourceChannelCount() const; + int getSourceChannelCount() const; /** * Get the number of channels of audio that will be provided @@ -182,21 +182,21 @@ * This may safely be called from a realtime thread. Returns 0 if * there is no source yet available. */ - size_t getTargetChannelCount() const; + int getTargetChannelCount() const; /** * Get the actual sample rate of the source material. This may * safely be called from a realtime thread. Returns 0 if there is * no source yet available. */ - virtual size_t getSourceSampleRate() const; + virtual int getSourceSampleRate() const; /** * Get "count" samples (at the target sample rate) of the mixed * audio data, in all channels. This may safely be called from a * realtime thread. */ - size_t getSourceSamples(size_t count, float **buffer); + int getSourceSamples(int count, float **buffer); /** * Set the time stretcher factor (i.e. playback speed). @@ -244,7 +244,7 @@ void playStatusChanged(bool isPlaying); - void sampleRateMismatch(size_t requested, size_t available, bool willResample); + void sampleRateMismatch(int requested, int available, bool willResample); void audioOverloadPluginDisabled(); void audioTimeStretchMultiChannelDisabled(); @@ -260,7 +260,7 @@ void playSelectionModeChanged(); void playParametersChanged(PlayParameters *); void preferenceChanged(PropertyContainer::PropertyName); - void modelChanged(size_t startFrame, size_t endFrame); + void modelChanged(int startFrame, int endFrame); protected: ViewManagerBase *m_viewManager; @@ -280,50 +280,50 @@ std::set<Model *> m_models; RingBufferVector *m_readBuffers; RingBufferVector *m_writeBuffers; - size_t m_readBufferFill; - size_t m_writeBufferFill; + int m_readBufferFill; + int m_writeBufferFill; Scavenger<RingBufferVector> m_bufferScavenger; - size_t m_sourceChannelCount; - size_t m_blockSize; - size_t m_sourceSampleRate; - size_t m_targetSampleRate; - size_t m_playLatency; + int m_sourceChannelCount; + int m_blockSize; + int m_sourceSampleRate; + int m_targetSampleRate; + int m_playLatency; AudioCallbackPlayTarget *m_target; double m_lastRetrievalTimestamp; - size_t m_lastRetrievedBlockSize; + int m_lastRetrievedBlockSize; bool m_trustworthyTimestamps; - size_t m_lastCurrentFrame; + int m_lastCurrentFrame; bool m_playing; bool m_exiting; - size_t m_lastModelEndFrame; - size_t m_ringBufferSize; + int m_lastModelEndFrame; + int m_ringBufferSize; float m_outputLeft; float m_outputRight; RealTimePluginInstance *m_auditioningPlugin; bool m_auditioningPluginBypassed; Scavenger<RealTimePluginInstance> m_pluginScavenger; - size_t m_playStartFrame; + int m_playStartFrame; bool m_playStartFramePassed; RealTime m_playStartedAt; - RingBuffer<float> *getWriteRingBuffer(size_t c) { - if (m_writeBuffers && c < m_writeBuffers->size()) { + RingBuffer<float> *getWriteRingBuffer(int c) { + if (m_writeBuffers && c < (int)m_writeBuffers->size()) { return (*m_writeBuffers)[c]; } else { return 0; } } - RingBuffer<float> *getReadRingBuffer(size_t c) { + RingBuffer<float> *getReadRingBuffer(int c) { RingBufferVector *rb = m_readBuffers; - if (rb && c < rb->size()) { + if (rb && c < (int)rb->size()) { return (*rb)[c]; } else { return 0; } } - void clearRingBuffers(bool haveLock = false, size_t count = 0); + void clearRingBuffers(bool haveLock = false, int count = 0); void unifyRingBuffers(); RubberBand::RubberBandStretcher *m_timeStretcher; @@ -331,9 +331,9 @@ float m_stretchRatio; bool m_stretchMono; - size_t m_stretcherInputCount; + int m_stretcherInputCount; float **m_stretcherInputs; - size_t *m_stretcherInputSizes; + int *m_stretcherInputSizes; // Called from fill thread, m_playing true, mutex held // Return true if work done @@ -343,17 +343,17 @@ // which will be count or fewer. Return in the frame argument the // new buffered frame position (which may be earlier than the // frame argument passed in, in the case of looping). - size_t mixModels(size_t &frame, size_t count, float **buffers); + int mixModels(int &frame, int count, float **buffers); // Called from getSourceSamples. - void applyAuditioningEffect(size_t count, float **buffers); + void applyAuditioningEffect(int count, float **buffers); // Ranges of current selections, if play selection is active std::vector<RealTime> m_rangeStarts; std::vector<RealTime> m_rangeDurations; void rebuildRangeLists(); - size_t getCurrentFrame(RealTime outputLatency); + int getCurrentFrame(RealTime outputLatency); class FillThread : public Thread {
--- a/audioio/AudioGenerator.cpp Tue Jun 03 11:03:09 2014 +0100 +++ b/audioio/AudioGenerator.cpp Tue Jun 17 16:23:06 2014 +0100 @@ -37,7 +37,7 @@ #include <QDir> #include <QFile> -const size_t +const int AudioGenerator::m_processingBlockSize = 1024; QString @@ -291,7 +291,7 @@ } void -AudioGenerator::setTargetChannelCount(size_t targetChannelCount) +AudioGenerator::setTargetChannelCount(int targetChannelCount) { if (m_targetChannelCount == targetChannelCount) return; @@ -305,7 +305,7 @@ } } -size_t +int AudioGenerator::getBlockSize() const { return m_processingBlockSize; @@ -329,9 +329,9 @@ m_soloing = false; } -size_t -AudioGenerator::mixModel(Model *model, size_t startFrame, size_t frameCount, - float **buffer, size_t fadeIn, size_t fadeOut) +int +AudioGenerator::mixModel(Model *model, int startFrame, int frameCount, + float **buffer, int fadeIn, int fadeOut) { if (m_sourceSampleRate == 0) { cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl; @@ -388,30 +388,30 @@ return frameCount; } -size_t +int AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm, - size_t startFrame, size_t frames, + int startFrame, int frames, float **buffer, float gain, float pan, - size_t fadeIn, size_t fadeOut) + int fadeIn, int fadeOut) { static float **channelBuffer = 0; - static size_t channelBufSiz = 0; - static size_t channelBufCount = 0; + static int channelBufSiz = 0; + static int channelBufCount = 0; - size_t totalFrames = frames + fadeIn/2 + fadeOut/2; + int totalFrames = frames + fadeIn/2 + fadeOut/2; - size_t modelChannels = dtvm->getChannelCount(); + int modelChannels = dtvm->getChannelCount(); if (channelBufSiz < totalFrames || channelBufCount < modelChannels) { - for (size_t c = 0; c < channelBufCount; ++c) { + for (int c = 0; c < channelBufCount; ++c) { delete[] channelBuffer[c]; } delete[] channelBuffer; channelBuffer = new float *[modelChannels]; - for (size_t c = 0; c < modelChannels; ++c) { + for (int c = 0; c < modelChannels; ++c) { channelBuffer[c] = new float[totalFrames]; } @@ -419,7 +419,7 @@ channelBufSiz = totalFrames; } - size_t got = 0; + int got = 0; if (startFrame >= fadeIn/2) { got = dtvm->getData(0, modelChannels - 1, @@ -427,9 +427,9 @@ frames + fadeOut/2 + fadeIn/2, channelBuffer); } else { - size_t missing = fadeIn/2 - startFrame; + int missing = fadeIn/2 - startFrame; - for (size_t c = 0; c < modelChannels; ++c) { + for (int c = 0; c < modelChannels; ++c) { channelBuffer[c] += missing; } @@ -438,16 +438,16 @@ frames + fadeOut/2, channelBuffer); - for (size_t c = 0; c < modelChannels; ++c) { + for (int c = 0; c < modelChannels; ++c) { channelBuffer[c] -= missing; } got += missing; } - for (size_t c = 0; c < m_targetChannelCount; ++c) { + for (int c = 0; c < m_targetChannelCount; ++c) { - size_t sourceChannel = (c % modelChannels); + int sourceChannel = (c % modelChannels); // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl; @@ -460,13 +460,13 @@ } } - for (size_t i = 0; i < fadeIn/2; ++i) { + for (int i = 0; i < fadeIn/2; ++i) { float *back = buffer[c]; back -= fadeIn/2; back[i] += (channelGain * channelBuffer[sourceChannel][i] * i) / fadeIn; } - for (size_t i = 0; i < frames + fadeOut/2; ++i) { + for (int i = 0; i < frames + fadeOut/2; ++i) { float mult = channelGain; if (i < fadeIn/2) { mult = (mult * i) / fadeIn; @@ -483,15 +483,15 @@ return got; } -size_t +int AudioGenerator::mixClipModel(Model *model, - size_t startFrame, size_t frames, + int startFrame, int frames, float **buffer, float gain, float pan) { ClipMixer *clipMixer = m_clipMixerMap[model]; if (!clipMixer) return 0; - size_t blocks = frames / m_processingBlockSize; + int blocks = frames / m_processingBlockSize; //!!! todo: the below -- it matters @@ -505,7 +505,7 @@ //callback play source has to use that as a multiple for all the //calls to mixModel - size_t got = blocks * m_processingBlockSize; + int got = blocks * m_processingBlockSize; #ifdef DEBUG_AUDIO_GENERATOR cout << "mixModel [clip]: frames " << frames @@ -519,15 +519,15 @@ float **bufferIndexes = new float *[m_targetChannelCount]; - for (size_t i = 0; i < blocks; ++i) { + for (int i = 0; i < blocks; ++i) { - size_t reqStart = startFrame + i * m_processingBlockSize; + int reqStart = startFrame + i * m_processingBlockSize; NoteList notes; NoteExportable *exportable = dynamic_cast<NoteExportable *>(model); if (exportable) { - notes = exportable->getNotes(reqStart, - reqStart + m_processingBlockSize); + notes = exportable->getNotesWithin(reqStart, + reqStart + m_processingBlockSize); } std::vector<ClipMixer::NoteStart> starts; @@ -536,7 +536,7 @@ for (NoteList::const_iterator ni = notes.begin(); ni != notes.end(); ++ni) { - size_t noteFrame = ni->start; + int noteFrame = ni->start; if (noteFrame < reqStart || noteFrame >= reqStart + m_processingBlockSize) continue; @@ -544,7 +544,7 @@ while (noteOffs.begin() != noteOffs.end() && noteOffs.begin()->frame <= noteFrame) { - size_t eventFrame = noteOffs.begin()->frame; + int eventFrame = noteOffs.begin()->frame; if (eventFrame < reqStart) eventFrame = reqStart; off.frameOffset = eventFrame - reqStart; @@ -575,7 +575,7 @@ while (noteOffs.begin() != noteOffs.end() && noteOffs.begin()->frame <= reqStart + m_processingBlockSize) { - size_t eventFrame = noteOffs.begin()->frame; + int eventFrame = noteOffs.begin()->frame; if (eventFrame < reqStart) eventFrame = reqStart; off.frameOffset = eventFrame - reqStart; @@ -589,7 +589,7 @@ noteOffs.erase(noteOffs.begin()); } - for (size_t c = 0; c < m_targetChannelCount; ++c) { + for (int c = 0; c < m_targetChannelCount; ++c) { bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; } @@ -601,10 +601,10 @@ return got; } -size_t +int AudioGenerator::mixContinuousSynthModel(Model *model, - size_t startFrame, - size_t frames, + int startFrame, + int frames, float **buffer, float gain, float pan) @@ -616,11 +616,11 @@ SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model); if (stvm->getScaleUnits() != "Hz") return 0; - size_t blocks = frames / m_processingBlockSize; + int blocks = frames / m_processingBlockSize; //!!! todo: see comment in mixClipModel - size_t got = blocks * m_processingBlockSize; + int got = blocks * m_processingBlockSize; #ifdef DEBUG_AUDIO_GENERATOR cout << "mixModel [synth]: frames " << frames @@ -629,11 +629,11 @@ float **bufferIndexes = new float *[m_targetChannelCount]; - for (size_t i = 0; i < blocks; ++i) { + for (int i = 0; i < blocks; ++i) { - size_t reqStart = startFrame + i * m_processingBlockSize; + int reqStart = startFrame + i * m_processingBlockSize; - for (size_t c = 0; c < m_targetChannelCount; ++c) { + for (int c = 0; c < m_targetChannelCount; ++c) { bufferIndexes[c] = buffer[c] + i * m_processingBlockSize; }
--- a/audioio/AudioGenerator.h Tue Jun 03 11:03:09 2014 +0100 +++ b/audioio/AudioGenerator.h Tue Jun 17 16:23:06 2014 +0100 @@ -67,20 +67,20 @@ * Set the target channel count. The buffer parameter to mixModel * must always point to at least this number of arrays. */ - virtual void setTargetChannelCount(size_t channelCount); + virtual void setTargetChannelCount(int channelCount); /** * Return the internal processing block size. The frameCount * argument to all mixModel calls must be a multiple of this * value. */ - virtual size_t getBlockSize() const; + virtual int getBlockSize() const; /** * Mix a single model into an output buffer. */ - virtual size_t mixModel(Model *model, size_t startFrame, size_t frameCount, - float **buffer, size_t fadeIn = 0, size_t fadeOut = 0); + virtual int mixModel(Model *model, int startFrame, int frameCount, + float **buffer, int fadeIn = 0, int fadeOut = 0); /** * Specify that only the given set of models should be played. @@ -97,19 +97,19 @@ void playClipIdChanged(const Playable *, QString); protected: - size_t m_sourceSampleRate; - size_t m_targetChannelCount; - size_t m_waveType; + int m_sourceSampleRate; + int m_targetChannelCount; + int m_waveType; bool m_soloing; std::set<Model *> m_soloModelSet; struct NoteOff { - NoteOff(float _freq, size_t _frame) : frequency(_freq), frame(_frame) { } + NoteOff(float _freq, int _frame) : frequency(_freq), frame(_frame) { } float frequency; - size_t frame; + int frame; struct Comparator { bool operator()(const NoteOff &n1, const NoteOff &n2) const { @@ -143,19 +143,19 @@ static void initialiseSampleDir(); - virtual size_t mixDenseTimeValueModel - (DenseTimeValueModel *model, size_t startFrame, size_t frameCount, - float **buffer, float gain, float pan, size_t fadeIn, size_t fadeOut); + virtual int mixDenseTimeValueModel + (DenseTimeValueModel *model, int startFrame, int frameCount, + float **buffer, float gain, float pan, int fadeIn, int fadeOut); - virtual size_t mixClipModel - (Model *model, size_t startFrame, size_t frameCount, + virtual int mixClipModel + (Model *model, int startFrame, int frameCount, float **buffer, float gain, float pan); - virtual size_t mixContinuousSynthModel - (Model *model, size_t startFrame, size_t frameCount, + virtual int mixContinuousSynthModel + (Model *model, int startFrame, int frameCount, float **buffer, float gain, float pan); - static const size_t m_processingBlockSize; + static const int m_processingBlockSize; }; #endif
--- a/audioio/AudioJACKTarget.cpp Tue Jun 03 11:03:09 2014 +0100 +++ b/audioio/AudioJACKTarget.cpp Tue Jun 17 16:23:06 2014 +0100 @@ -171,6 +171,20 @@ return f(client, port); } +static void dynamic_jack_port_get_latency_range(jack_port_t *port, + jack_latency_callback_mode_t mode, + jack_latency_range_t *range) +{ + typedef void (*func)(jack_port_t *, jack_latency_callback_mode_t, jack_latency_range_t *); + void *s = symbol("jack_port_get_latency_range"); + if (!s) { + range.min = range.max = 0; + return; + } + func f = (func)s; + f(port, mode, range); +} + #define dynamic1(rv, name, argtype, failval) \ static rv dynamic_##name(argtype arg) { \ typedef rv (*func) (argtype); \ @@ -187,7 +201,6 @@ dynamic1(int, jack_deactivate, jack_client_t *, 1); dynamic1(int, jack_client_close, jack_client_t *, 1); dynamic1(jack_nframes_t, jack_frame_time, jack_client_t *, 0); -dynamic1(jack_nframes_t, jack_port_get_latency, jack_port_t *, 0); dynamic1(const char *, jack_port_name, const jack_port_t *, 0); #define jack_client_new dynamic_jack_client_new @@ -203,7 +216,6 @@ #define jack_get_ports dynamic_jack_get_ports #define jack_port_register dynamic_jack_port_register #define jack_port_unregister dynamic_jack_port_unregister -#define jack_port_get_latency dynamic_jack_port_get_latency #define jack_port_name dynamic_jack_port_name #define jack_connect dynamic_jack_connect #define jack_port_get_buffer dynamic_jack_port_get_buffer @@ -334,12 +346,12 @@ m_source->setTarget(this, m_bufferSize); m_source->setTargetSampleRate(m_sampleRate); - size_t channels = m_source->getSourceChannelCount(); + int channels = m_source->getSourceChannelCount(); // Because we offer pan, we always want at least 2 channels if (channels < 2) channels = 2; - if (channels == m_outputs.size() || !m_client) { + if (channels == (int)m_outputs.size() || !m_client) { m_mutex.unlock(); return; } @@ -347,14 +359,14 @@ const char **ports = jack_get_ports(m_client, NULL, NULL, JackPortIsPhysical | JackPortIsInput); - size_t physicalPortCount = 0; + int physicalPortCount = 0; while (ports[physicalPortCount]) ++physicalPortCount; #ifdef DEBUG_AUDIO_JACK_TARGET SVDEBUG << "AudioJACKTarget::sourceModelReplaced: have " << channels << " channels and " << physicalPortCount << " physical ports" << endl; #endif - while (m_outputs.size() < channels) { + while ((int)m_outputs.size() < channels) { char name[20]; jack_port_t *port; @@ -372,17 +384,20 @@ << "ERROR: AudioJACKTarget: Failed to create JACK output port " << m_outputs.size() << endl; } else { - m_source->setTargetPlayLatency(jack_port_get_latency(port)); + jack_latency_range_t range; + jack_port_get_latency_range(port, JackPlaybackLatency, &range); + m_source->setTargetPlayLatency(range.max); + cerr << "AudioJACKTarget: output latency is " << range.max << endl; } - if (m_outputs.size() < physicalPortCount) { + if ((int)m_outputs.size() < physicalPortCount) { jack_connect(m_client, jack_port_name(port), ports[m_outputs.size()]); } m_outputs.push_back(port); } - while (m_outputs.size() > channels) { + while ((int)m_outputs.size() > channels) { std::vector<jack_port_t *>::iterator itr = m_outputs.end(); --itr; jack_port_t *port = *itr; @@ -419,29 +434,29 @@ float **buffers = (float **)alloca(m_outputs.size() * sizeof(float *)); - for (size_t ch = 0; ch < m_outputs.size(); ++ch) { + for (int ch = 0; ch < (int)m_outputs.size(); ++ch) { buffers[ch] = (float *)jack_port_get_buffer(m_outputs[ch], nframes); } - size_t received = 0; + int received = 0; if (m_source) { received = m_source->getSourceSamples(nframes, buffers); } - for (size_t ch = 0; ch < m_outputs.size(); ++ch) { - for (size_t i = received; i < nframes; ++i) { + for (int ch = 0; ch < (int)m_outputs.size(); ++ch) { + for (int i = received; i < (int)nframes; ++i) { buffers[ch][i] = 0.0; } } float peakLeft = 0.0, peakRight = 0.0; - for (size_t ch = 0; ch < m_outputs.size(); ++ch) { + for (int ch = 0; ch < (int)m_outputs.size(); ++ch) { float peak = 0.0; - for (size_t i = 0; i < nframes; ++i) { + for (int i = 0; i < (int)nframes; ++i) { buffers[ch][i] *= m_outputGain; float sample = fabsf(buffers[ch][i]); if (sample > peak) peak = sample;
--- a/audioio/AudioPortAudioTarget.cpp Tue Jun 03 11:03:09 2014 +0100 +++ b/audioio/AudioPortAudioTarget.cpp Tue Jun 17 16:23:06 2014 +0100 @@ -218,10 +218,10 @@ assert(nframes <= m_bufferSize); static float **tmpbuf = 0; - static size_t tmpbufch = 0; - static size_t tmpbufsz = 0; + static int tmpbufch = 0; + static int tmpbufsz = 0; - size_t sourceChannels = m_source->getSourceChannelCount(); + int sourceChannels = m_source->getSourceChannelCount(); // Because we offer pan, we always want at least 2 channels if (sourceChannels < 2) sourceChannels = 2; @@ -229,7 +229,7 @@ if (!tmpbuf || tmpbufch != sourceChannels || int(tmpbufsz) < m_bufferSize) { if (tmpbuf) { - for (size_t i = 0; i < tmpbufch; ++i) { + for (int i = 0; i < tmpbufch; ++i) { delete[] tmpbuf[i]; } delete[] tmpbuf; @@ -239,23 +239,23 @@ tmpbufsz = m_bufferSize; tmpbuf = new float *[tmpbufch]; - for (size_t i = 0; i < tmpbufch; ++i) { + for (int i = 0; i < tmpbufch; ++i) { tmpbuf[i] = new float[tmpbufsz]; } } - size_t received = m_source->getSourceSamples(nframes, tmpbuf); + int received = m_source->getSourceSamples(nframes, tmpbuf); float peakLeft = 0.0, peakRight = 0.0; - for (size_t ch = 0; ch < 2; ++ch) { + for (int ch = 0; ch < 2; ++ch) { float peak = 0.0; if (ch < sourceChannels) { // PortAudio samples are interleaved - for (size_t i = 0; i < nframes; ++i) { + for (int i = 0; i < nframes; ++i) { if (i < received) { output[i * 2 + ch] = tmpbuf[ch][i] * m_outputGain; float sample = fabsf(output[i * 2 + ch]); @@ -267,7 +267,7 @@ } else if (ch == 1 && sourceChannels == 1) { - for (size_t i = 0; i < nframes; ++i) { + for (int i = 0; i < nframes; ++i) { if (i < received) { output[i * 2 + ch] = tmpbuf[0][i] * m_outputGain; float sample = fabsf(output[i * 2 + ch]); @@ -278,7 +278,7 @@ } } else { - for (size_t i = 0; i < nframes; ++i) { + for (int i = 0; i < nframes; ++i) { output[i * 2 + ch] = 0; } }
--- a/audioio/AudioPulseAudioTarget.cpp Tue Jun 03 11:03:09 2014 +0100 +++ b/audioio/AudioPulseAudioTarget.cpp Tue Jun 17 16:23:06 2014 +0100 @@ -163,7 +163,7 @@ } void -AudioPulseAudioTarget::streamWrite(size_t requested) +AudioPulseAudioTarget::streamWrite(int requested) { #ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY cout << "AudioPulseAudioTarget::streamWrite(" << requested << ")" << endl; @@ -181,15 +181,15 @@ static float *output = 0; static float **tmpbuf = 0; - static size_t tmpbufch = 0; - static size_t tmpbufsz = 0; + static int tmpbufch = 0; + static int tmpbufsz = 0; - size_t sourceChannels = m_source->getSourceChannelCount(); + int sourceChannels = m_source->getSourceChannelCount(); // Because we offer pan, we always want at least 2 channels if (sourceChannels < 2) sourceChannels = 2; - size_t nframes = requested / (sourceChannels * sizeof(float)); + int nframes = requested / (sourceChannels * sizeof(float)); if (nframes > m_bufferSize) { cerr << "WARNING: AudioPulseAudioTarget::streamWrite: nframes " << nframes << " > m_bufferSize " << m_bufferSize << endl; @@ -202,7 +202,7 @@ if (!tmpbuf || tmpbufch != sourceChannels || int(tmpbufsz) < nframes) { if (tmpbuf) { - for (size_t i = 0; i < tmpbufch; ++i) { + for (int i = 0; i < tmpbufch; ++i) { delete[] tmpbuf[i]; } delete[] tmpbuf; @@ -216,14 +216,14 @@ tmpbufsz = nframes; tmpbuf = new float *[tmpbufch]; - for (size_t i = 0; i < tmpbufch; ++i) { + for (int i = 0; i < tmpbufch; ++i) { tmpbuf[i] = new float[tmpbufsz]; } output = new float[tmpbufsz * tmpbufch]; } - size_t received = m_source->getSourceSamples(nframes, tmpbuf); + int received = m_source->getSourceSamples(nframes, tmpbuf); #ifdef DEBUG_AUDIO_PULSE_AUDIO_TARGET_PLAY cerr << "requested " << nframes << ", received " << received << endl; @@ -235,14 +235,14 @@ float peakLeft = 0.0, peakRight = 0.0; - for (size_t ch = 0; ch < 2; ++ch) { + for (int ch = 0; ch < 2; ++ch) { float peak = 0.0; if (ch < sourceChannels) { // PulseAudio samples are interleaved - for (size_t i = 0; i < nframes; ++i) { + for (int i = 0; i < nframes; ++i) { if (i < received) { output[i * 2 + ch] = tmpbuf[ch][i] * m_outputGain; float sample = fabsf(output[i * 2 + ch]); @@ -254,7 +254,7 @@ } else if (ch == 1 && sourceChannels == 1) { - for (size_t i = 0; i < nframes; ++i) { + for (int i = 0; i < nframes; ++i) { if (i < received) { output[i * 2 + ch] = tmpbuf[0][i] * m_outputGain; float sample = fabsf(output[i * 2 + ch]); @@ -265,7 +265,7 @@ } } else { - for (size_t i = 0; i < nframes; ++i) { + for (int i = 0; i < nframes; ++i) { output[i * 2 + ch] = 0; } } @@ -308,47 +308,48 @@ switch (pa_stream_get_state(m_stream)) { - case PA_STREAM_CREATING: - case PA_STREAM_TERMINATED: - break; + case PA_STREAM_UNCONNECTED: + case PA_STREAM_CREATING: + case PA_STREAM_TERMINATED: + break; - case PA_STREAM_READY: - { - SVDEBUG << "AudioPulseAudioTarget::streamStateChanged: Ready" << endl; + case PA_STREAM_READY: + { + SVDEBUG << "AudioPulseAudioTarget::streamStateChanged: Ready" << endl; + + pa_usec_t latency = 0; + int negative = 0; + if (pa_stream_get_latency(m_stream, &latency, &negative)) { + cerr << "AudioPulseAudioTarget::streamStateChanged: Failed to query latency" << endl; + } + cerr << "Latency = " << latency << " usec" << endl; + int latframes = (latency / 1000000.f) * float(m_sampleRate); + cerr << "that's " << latframes << " frames" << endl; - pa_usec_t latency = 0; - int negative = 0; - if (pa_stream_get_latency(m_stream, &latency, &negative)) { - cerr << "AudioPulseAudioTarget::streamStateChanged: Failed to query latency" << endl; - } - cerr << "Latency = " << latency << " usec" << endl; - int latframes = (latency / 1000000.f) * float(m_sampleRate); - cerr << "that's " << latframes << " frames" << endl; - - const pa_buffer_attr *attr; - if (!(attr = pa_stream_get_buffer_attr(m_stream))) { - SVDEBUG << "AudioPulseAudioTarget::streamStateChanged: Cannot query stream buffer attributes" << endl; - m_source->setTarget(this, m_bufferSize); - m_source->setTargetSampleRate(m_sampleRate); - if (latframes != 0) m_source->setTargetPlayLatency(latframes); - } else { - int targetLength = attr->tlength; - SVDEBUG << "AudioPulseAudioTarget::streamStateChanged: stream target length = " << targetLength << endl; - m_source->setTarget(this, targetLength); - m_source->setTargetSampleRate(m_sampleRate); - if (latframes == 0) latframes = targetLength; - cerr << "latency = " << latframes << endl; - m_source->setTargetPlayLatency(latframes); - } + const pa_buffer_attr *attr; + if (!(attr = pa_stream_get_buffer_attr(m_stream))) { + SVDEBUG << "AudioPulseAudioTarget::streamStateChanged: Cannot query stream buffer attributes" << endl; + m_source->setTarget(this, m_bufferSize); + m_source->setTargetSampleRate(m_sampleRate); + if (latframes != 0) m_source->setTargetPlayLatency(latframes); + } else { + int targetLength = attr->tlength; + SVDEBUG << "AudioPulseAudioTarget::streamStateChanged: stream target length = " << targetLength << endl; + m_source->setTarget(this, targetLength); + m_source->setTargetSampleRate(m_sampleRate); + if (latframes == 0) latframes = targetLength; + cerr << "latency = " << latframes << endl; + m_source->setTargetPlayLatency(latframes); } - break; - - case PA_STREAM_FAILED: - default: - cerr << "AudioPulseAudioTarget::streamStateChanged: Error: " - << pa_strerror(pa_context_errno(m_context)) << endl; - //!!! do something... - break; + } + break; + + case PA_STREAM_FAILED: + default: + cerr << "AudioPulseAudioTarget::streamStateChanged: Error: " + << pa_strerror(pa_context_errno(m_context)) << endl; + //!!! do something... + break; } } @@ -373,6 +374,7 @@ switch (pa_context_get_state(m_context)) { + case PA_CONTEXT_UNCONNECTED: case PA_CONTEXT_CONNECTING: case PA_CONTEXT_AUTHORIZING: case PA_CONTEXT_SETTING_NAME:
--- a/audioio/AudioPulseAudioTarget.h Tue Jun 03 11:03:09 2014 +0100 +++ b/audioio/AudioPulseAudioTarget.h Tue Jun 17 16:23:06 2014 +0100 @@ -46,7 +46,7 @@ virtual void sourceModelReplaced(); protected: - void streamWrite(size_t); + void streamWrite(int); void streamStateChanged(); void contextStateChanged();
--- a/audioio/ClipMixer.cpp Tue Jun 03 11:03:09 2014 +0100 +++ b/audioio/ClipMixer.cpp Tue Jun 17 16:23:06 2014 +0100 @@ -49,9 +49,8 @@ SF_INFO info; SNDFILE *file; - int sampleCount = 0; float *tmpFrames; - size_t i; + int i; info.format = 0; file = sf_open(path.toLocal8Bit().data(), SFM_READ, &info);
--- a/framework/Document.cpp Tue Jun 03 11:03:09 2014 +0100 +++ b/framework/Document.cpp Tue Jun 17 16:23:06 2014 +0100 @@ -235,7 +235,7 @@ } QStringList names; - for (int i = 0; i < newModels.size(); ++i) { + for (int i = 0; i < (int)newModels.size(); ++i) { names.push_back(getUniqueLayerName (TransformFactory::getInstance()-> getTransformFriendlyName @@ -305,7 +305,7 @@ (transforms, input, message, amc); QStringList names; - for (int i = 0; i < newModels.size(); ++i) { + for (int i = 0; i < (int)newModels.size(); ++i) { names.push_back(getUniqueLayerName (TransformFactory::getInstance()-> getTransformFriendlyName @@ -493,10 +493,9 @@ << typeid(*replacementModel).name() << ") in layer " << layer << " (name " << layer->objectName() << ")" << endl; -#endif + RangeSummarisableTimeValueModel *rm = dynamic_cast<RangeSummarisableTimeValueModel *>(replacementModel); -#ifdef DEBUG_DOCUMENT if (rm) { cerr << "new model has " << rm->getChannelCount() << " channels " << endl; } else {
--- a/framework/MainWindowBase.cpp Tue Jun 03 11:03:09 2014 +0100 +++ b/framework/MainWindowBase.cpp Tue Jun 17 16:23:06 2014 +0100 @@ -213,8 +213,8 @@ m_playSource = new AudioCallbackPlaySource(m_viewManager, QApplication::applicationName()); - connect(m_playSource, SIGNAL(sampleRateMismatch(size_t, size_t, bool)), - this, SLOT(sampleRateMismatch(size_t, size_t, bool))); + connect(m_playSource, SIGNAL(sampleRateMismatch(int, int, bool)), + this, SLOT(sampleRateMismatch(int, int, bool))); connect(m_playSource, SIGNAL(audioOverloadPluginDisabled()), this, SLOT(audioOverloadPluginDisabled())); connect(m_playSource, SIGNAL(audioTimeStretchMultiChannelDisabled()), @@ -223,17 +223,17 @@ connect(m_viewManager, SIGNAL(outputLevelsChanged(float, float)), this, SLOT(outputLevelsChanged(float, float))); - connect(m_viewManager, SIGNAL(playbackFrameChanged(unsigned long)), - this, SLOT(playbackFrameChanged(unsigned long))); - - connect(m_viewManager, SIGNAL(globalCentreFrameChanged(unsigned long)), - this, SLOT(globalCentreFrameChanged(unsigned long))); - - connect(m_viewManager, SIGNAL(viewCentreFrameChanged(View *, unsigned long)), - this, SLOT(viewCentreFrameChanged(View *, unsigned long))); - - connect(m_viewManager, SIGNAL(viewZoomLevelChanged(View *, unsigned long, bool)), - this, SLOT(viewZoomLevelChanged(View *, unsigned long, bool))); + connect(m_viewManager, SIGNAL(playbackFrameChanged(int)), + this, SLOT(playbackFrameChanged(int))); + + connect(m_viewManager, SIGNAL(globalCentreFrameChanged(int)), + this, SLOT(globalCentreFrameChanged(int))); + + connect(m_viewManager, SIGNAL(viewCentreFrameChanged(View *, int)), + this, SLOT(viewCentreFrameChanged(View *, int))); + + connect(m_viewManager, SIGNAL(viewZoomLevelChanged(View *, int, bool)), + this, SLOT(viewZoomLevelChanged(View *, int, bool))); connect(Preferences::getInstance(), SIGNAL(propertyChanged(PropertyContainer::PropertyName)), @@ -281,7 +281,11 @@ } void -MainWindowBase::finaliseMenu(QMenu *menu) +MainWindowBase::finaliseMenu(QMenu * +#ifdef Q_OS_MAC + menu +#endif + ) { #ifdef Q_OS_MAC // See https://bugreports.qt-project.org/browse/QTBUG-38256 and @@ -744,7 +748,7 @@ Pane *currentPane = m_paneStack->getCurrentPane(); if (!currentPane) return; - size_t startFrame, endFrame; + int startFrame, endFrame; if (currentPane->getStartFrame() < 0) startFrame = 0; else startFrame = currentPane->getStartFrame(); @@ -815,17 +819,17 @@ void MainWindowBase::pasteAtPlaybackPosition() { - unsigned long pos = getFrame(); + int pos = getFrame(); Clipboard &clipboard = m_viewManager->getClipboard(); if (!clipboard.empty()) { - long firstEventFrame = clipboard.getPoints()[0].getFrame(); - long offset = 0; + int firstEventFrame = clipboard.getPoints()[0].getFrame(); + int offset = 0; if (firstEventFrame < 0) { - offset = (long)pos - firstEventFrame; + offset = pos - firstEventFrame; } else if (firstEventFrame < pos) { - offset = pos - (unsigned long)firstEventFrame; + offset = pos - firstEventFrame; } else { - offset = -((unsigned long)firstEventFrame - pos); + offset = -(firstEventFrame - pos); } pasteRelative(offset); } @@ -898,7 +902,7 @@ // FrameTimer method -unsigned long +int MainWindowBase::getFrame() const { if (m_playSource && m_playSource->isPlaying()) { @@ -920,8 +924,8 @@ MultiSelection::SelectionList selections = m_viewManager->getSelections(); for (MultiSelection::SelectionList::iterator i = selections.begin(); i != selections.end(); ++i) { - size_t start = i->getStartFrame(); - size_t end = i->getEndFrame(); + int start = i->getStartFrame(); + int end = i->getEndFrame(); if (start != end) { insertInstantAt(start); insertInstantAt(end); @@ -930,7 +934,7 @@ } void -MainWindowBase::insertInstantAt(size_t frame) +MainWindowBase::insertInstantAt(int frame) { Pane *pane = m_paneStack->getCurrentPane(); if (!pane) { @@ -1022,8 +1026,8 @@ MultiSelection::SelectionList selections = m_viewManager->getSelections(); for (MultiSelection::SelectionList::iterator i = selections.begin(); i != selections.end(); ++i) { - size_t start = i->getStartFrame(); - size_t end = i->getEndFrame(); + int start = i->getStartFrame(); + int end = i->getEndFrame(); if (start < end) { insertItemAt(start, end - start); } @@ -1031,7 +1035,7 @@ } void -MainWindowBase::insertItemAt(size_t frame, size_t duration) +MainWindowBase::insertItemAt(int frame, int duration) { Pane *pane = m_paneStack->getCurrentPane(); if (!pane) { @@ -1040,10 +1044,10 @@ // ugh! - size_t alignedStart = pane->alignFromReference(frame); - size_t alignedEnd = pane->alignFromReference(frame + duration); + int alignedStart = pane->alignFromReference(frame); + int alignedEnd = pane->alignFromReference(frame + duration); if (alignedStart >= alignedEnd) return; - size_t alignedDuration = alignedEnd - alignedStart; + int alignedDuration = alignedEnd - alignedStart; Command *c = 0; @@ -1237,7 +1241,7 @@ m_openingAudioFile = true; - size_t rate = 0; + int rate = 0; if (Preferences::getInstance()->getResampleOnLoad()) { rate = m_playSource->getSourceSampleRate(); @@ -1848,7 +1852,6 @@ QXmlInputSource *inputSource = 0; QFile *file = 0; - bool isTemplate = false; file = new QFile(source.getLocalFilename()); inputSource = new QXmlInputSource(file); @@ -1938,7 +1941,7 @@ MainWindowBase::FileOpenStatus MainWindowBase::openLayersFromRDF(FileSource source) { - size_t rate = 0; + int rate = 0; SVDEBUG << "MainWindowBase::openLayersFromRDF" << endl; @@ -2316,17 +2319,17 @@ Model *model = getMainModel(); if (!model) return; - size_t start = model->getStartFrame(); - size_t end = model->getEndFrame(); + int start = model->getStartFrame(); + int end = model->getEndFrame(); if (m_playSource) end = std::max(end, m_playSource->getPlayEndFrame()); - size_t pixels = currentPane->width(); - - size_t sw = currentPane->getVerticalScaleWidth(); + int pixels = currentPane->width(); + + int sw = currentPane->getVerticalScaleWidth(); if (pixels > sw * 2) pixels -= sw * 2; else pixels = 1; if (pixels > 4) pixels -= 4; - size_t zoomLevel = (end - start) / pixels; + int zoomLevel = (end - start) / pixels; if (zoomLevel < 1) zoomLevel = 1; currentPane->setZoomLevel(zoomLevel); @@ -2544,7 +2547,7 @@ Pane *pane = m_paneStack->getCurrentPane(); Layer *layer = getSnapLayer(); - size_t sr = getMainModel()->getSampleRate(); + int sr = getMainModel()->getSampleRate(); if (!layer) { @@ -2556,7 +2559,7 @@ } else { - size_t resolution = 0; + int resolution = 0; if (pane) frame = pane->alignFromReference(frame); if (layer->snapToFeatureFrame(m_paneStack->getCurrentPane(), frame, resolution, Layer::SnapRight)) { @@ -2569,7 +2572,7 @@ if (frame < 0) frame = 0; if (m_viewManager->getPlaySelectionMode()) { - frame = m_viewManager->constrainFrameToSelection(size_t(frame)); + frame = m_viewManager->constrainFrameToSelection(int(frame)); } m_viewManager->setPlaybackFrame(frame); @@ -2593,7 +2596,7 @@ stop(); } - size_t frame = getMainModel()->getEndFrame(); + int frame = getMainModel()->getEndFrame(); if (m_viewManager->getPlaySelectionMode()) { frame = m_viewManager->constrainFrameToSelection(frame); @@ -2611,11 +2614,10 @@ if (!layer) { ffwd(); return; } Pane *pane = m_paneStack->getCurrentPane(); - size_t sr = getMainModel()->getSampleRate(); int frame = m_viewManager->getPlaybackFrame(); - size_t resolution = 0; + int resolution = 0; if (pane) frame = pane->alignFromReference(frame); if (layer->snapToSimilarFeature(m_paneStack->getCurrentPane(), frame, resolution, Layer::SnapRight)) { @@ -2627,7 +2629,7 @@ if (frame < 0) frame = 0; if (m_viewManager->getPlaySelectionMode()) { - frame = m_viewManager->constrainFrameToSelection(size_t(frame)); + frame = m_viewManager->constrainFrameToSelection(int(frame)); } m_viewManager->setPlaybackFrame(frame); @@ -2650,7 +2652,7 @@ Pane *pane = m_paneStack->getCurrentPane(); Layer *layer = getSnapLayer(); - size_t sr = getMainModel()->getSampleRate(); + int sr = getMainModel()->getSampleRate(); // when rewinding during playback, we want to allow a period // following a rewind target point at which the rewind will go to @@ -2672,7 +2674,7 @@ } else { - size_t resolution = 0; + int resolution = 0; if (pane) frame = pane->alignFromReference(frame); if (layer->snapToFeatureFrame(m_paneStack->getCurrentPane(), frame, resolution, Layer::SnapLeft)) { @@ -2685,7 +2687,7 @@ if (frame < 0) frame = 0; if (m_viewManager->getPlaySelectionMode()) { - frame = m_viewManager->constrainFrameToSelection(size_t(frame)); + frame = m_viewManager->constrainFrameToSelection(int(frame)); } m_viewManager->setPlaybackFrame(frame); @@ -2696,7 +2698,7 @@ { if (!getMainModel()) return; - size_t frame = getMainModel()->getStartFrame(); + int frame = getMainModel()->getStartFrame(); if (m_viewManager->getPlaySelectionMode()) { frame = m_viewManager->constrainFrameToSelection(frame); @@ -2714,11 +2716,10 @@ if (!layer) { rewind(); return; } Pane *pane = m_paneStack->getCurrentPane(); - size_t sr = getMainModel()->getSampleRate(); int frame = m_viewManager->getPlaybackFrame(); - size_t resolution = 0; + int resolution = 0; if (pane) frame = pane->alignFromReference(frame); if (layer->snapToSimilarFeature(m_paneStack->getCurrentPane(), frame, resolution, Layer::SnapLeft)) { @@ -2730,7 +2731,7 @@ if (frame < 0) frame = 0; if (m_viewManager->getPlaySelectionMode()) { - frame = m_viewManager->constrainFrameToSelection(size_t(frame)); + frame = m_viewManager->constrainFrameToSelection(int(frame)); } m_viewManager->setPlaybackFrame(frame); @@ -2942,24 +2943,24 @@ MainWindowBase::connectLayerEditDialog(ModelDataTableDialog *dialog) { connect(m_viewManager, - SIGNAL(globalCentreFrameChanged(unsigned long)), + SIGNAL(globalCentreFrameChanged(int)), dialog, - SLOT(userScrolledToFrame(unsigned long))); + SLOT(userScrolledToFrame(int))); connect(m_viewManager, - SIGNAL(playbackFrameChanged(unsigned long)), + SIGNAL(playbackFrameChanged(int)), dialog, - SLOT(playbackScrolledToFrame(unsigned long))); + SLOT(playbackScrolledToFrame(int))); connect(dialog, - SIGNAL(scrollToFrame(unsigned long)), + SIGNAL(scrollToFrame(int)), m_viewManager, - SLOT(setGlobalCentreFrame(unsigned long))); + SLOT(setGlobalCentreFrame(int))); connect(dialog, - SIGNAL(scrollToFrame(unsigned long)), + SIGNAL(scrollToFrame(int)), m_viewManager, - SLOT(setPlaybackFrame(unsigned long))); + SLOT(setPlaybackFrame(int))); } void @@ -3047,7 +3048,7 @@ } void -MainWindowBase::playbackFrameChanged(unsigned long frame) +MainWindowBase::playbackFrameChanged(int frame) { if (!(m_playSource && m_playSource->isPlaying()) || !getMainModel()) return; @@ -3083,7 +3084,7 @@ } void -MainWindowBase::globalCentreFrameChanged(unsigned long ) +MainWindowBase::globalCentreFrameChanged(int ) { if ((m_playSource && m_playSource->isPlaying()) || !getMainModel()) return; Pane *p = 0; @@ -3093,7 +3094,7 @@ } void -MainWindowBase::viewCentreFrameChanged(View *v, unsigned long frame) +MainWindowBase::viewCentreFrameChanged(View *v, int frame) { // SVDEBUG << "MainWindowBase::viewCentreFrameChanged(" << v << "," << frame << ")" << endl; @@ -3110,7 +3111,7 @@ } void -MainWindowBase::viewZoomLevelChanged(View *v, unsigned long , bool ) +MainWindowBase::viewZoomLevelChanged(View *v, int , bool ) { if ((m_playSource && m_playSource->isPlaying()) || !getMainModel()) return; Pane *p = 0; @@ -3324,7 +3325,6 @@ process->start(command, args); #else -#ifdef Q_WS_X11 if (!qgetenv("KDE_FULL_SESSION").isEmpty()) { args.append("exec"); args.append(url); @@ -3338,7 +3338,6 @@ } #endif #endif -#endif }
--- a/framework/MainWindowBase.h Tue Jun 03 11:03:09 2014 +0100 +++ b/framework/MainWindowBase.h Tue Jun 17 16:23:06 2014 +0100 @@ -110,7 +110,7 @@ virtual bool saveSessionTemplate(QString path); /// Implementation of FrameTimer interface method - virtual unsigned long getFrame() const; + virtual int getFrame() const; signals: // Used to toggle the availability of menu actions @@ -207,14 +207,14 @@ virtual void playSelectionToggled(); virtual void playSoloToggled(); - virtual void sampleRateMismatch(size_t, size_t, bool) = 0; + virtual void sampleRateMismatch(int, int, bool) = 0; virtual void audioOverloadPluginDisabled() = 0; virtual void audioTimeStretchMultiChannelDisabled() = 0; - virtual void playbackFrameChanged(unsigned long); - virtual void globalCentreFrameChanged(unsigned long); - virtual void viewCentreFrameChanged(View *, unsigned long); - virtual void viewZoomLevelChanged(View *, unsigned long, bool); + virtual void playbackFrameChanged(int); + virtual void globalCentreFrameChanged(int); + virtual void viewCentreFrameChanged(View *, int); + virtual void viewZoomLevelChanged(View *, int, bool); virtual void outputLevelsChanged(float, float) = 0; virtual void currentPaneChanged(Pane *); @@ -234,10 +234,10 @@ virtual void deleteSelected(); virtual void insertInstant(); - virtual void insertInstantAt(size_t); + virtual void insertInstantAt(int); virtual void insertInstantsAtBoundaries(); virtual void insertItemAtSelection(); - virtual void insertItemAt(size_t, size_t); + virtual void insertItemAt(int, int); virtual void renumberInstants(); virtual void documentModified();
--- a/framework/SVFileReader.cpp Tue Jun 03 11:03:09 2014 +0100 +++ b/framework/SVFileReader.cpp Tue Jun 17 16:23:06 2014 +0100 @@ -300,7 +300,7 @@ } else if (name == "derivation") { if (!m_currentDerivedModel) { - if (m_currentDerivedModel < 0) { + if (m_currentDerivedModelId < 0) { cerr << "WARNING: SV-XML: Bad derivation output model id " << m_currentDerivedModelId << endl; } else if (haveModel(m_currentDerivedModelId)) { @@ -473,7 +473,7 @@ file.waitForData(); - size_t rate = 0; + int rate = 0; if (!mainModel && Preferences::getInstance()->getResampleOnLoad()) { @@ -754,8 +754,8 @@ // The view properties first - READ_MANDATORY(size_t, centre, toUInt); - READ_MANDATORY(size_t, zoom, toUInt); + READ_MANDATORY(int, centre, toInt); + READ_MANDATORY(int, zoom, toInt); READ_MANDATORY(int, followPan, toInt); READ_MANDATORY(int, followZoom, toInt); QString tracking = attributes.value("tracking"); @@ -1004,8 +1004,8 @@ cerr << "Current dataset is a note model" << endl; float value = 0.0; value = attributes.value("value").trimmed().toFloat(&ok); - size_t duration = 0; - duration = attributes.value("duration").trimmed().toUInt(&ok); + int duration = 0; + duration = attributes.value("duration").trimmed().toInt(&ok); QString label = attributes.value("label"); float level = attributes.value("level").trimmed().toFloat(&ok); if (!ok) { // level is optional @@ -1022,8 +1022,8 @@ cerr << "Current dataset is a flexinote model" << endl; float value = 0.0; value = attributes.value("value").trimmed().toFloat(&ok); - size_t duration = 0; - duration = attributes.value("duration").trimmed().toUInt(&ok); + int duration = 0; + duration = attributes.value("duration").trimmed().toInt(&ok); QString label = attributes.value("label"); float level = attributes.value("level").trimmed().toFloat(&ok); if (!ok) { // level is optional @@ -1040,8 +1040,8 @@ cerr << "Current dataset is a region model" << endl; float value = 0.0; value = attributes.value("value").trimmed().toFloat(&ok); - size_t duration = 0; - duration = attributes.value("duration").trimmed().toUInt(&ok); + int duration = 0; + duration = attributes.value("duration").trimmed().toInt(&ok); QString label = attributes.value("label"); rm->addPoint(RegionModel::Point(frame, value, duration, label)); return ok; @@ -1245,8 +1245,8 @@ QString startFrameStr = attributes.value("startFrame"); QString durationStr = attributes.value("duration"); - size_t startFrame = 0; - size_t duration = 0; + int startFrame = 0; + int duration = 0; if (startFrameStr != "") { startFrame = startFrameStr.trimmed().toInt(&ok);
--- a/framework/TransformUserConfigurator.cpp Tue Jun 03 11:03:09 2014 +0100 +++ b/framework/TransformUserConfigurator.cpp Tue Jun 17 16:23:06 2014 +0100 @@ -54,8 +54,8 @@ Vamp::PluginBase *plugin, Model *&inputModel, AudioPlaySource *source, - size_t startFrame, - size_t duration, + int startFrame, + int duration, const QMap<QString, Model *> &modelMap, QStringList candidateModelNames, QString defaultModelName) @@ -201,7 +201,7 @@ } } - size_t stepSize = 0, blockSize = 0; + int stepSize = 0, blockSize = 0; WindowType windowType = HanningWindow; dialog->getProcessingParameters(stepSize,
--- a/framework/TransformUserConfigurator.h Tue Jun 03 11:03:09 2014 +0100 +++ b/framework/TransformUserConfigurator.h Tue Jun 17 16:23:06 2014 +0100 @@ -27,8 +27,8 @@ Vamp::PluginBase *plugin, Model *&inputModel, AudioPlaySource *source, - size_t startFrame, - size_t duration, + int startFrame, + int duration, const QMap<QString, Model *> &modelMap, QStringList candidateModelNames, QString defaultModelName);