Mercurial > hg > svapp
comparison audio/AudioCallbackPlaySource.cpp @ 468:56acd9368532 bqaudioio
Initial work toward switching to bqaudioio library (so as to get I/O, not just O)
author | Chris Cannam |
---|---|
date | Tue, 04 Aug 2015 13:27:42 +0100 |
parents | audioio/AudioCallbackPlaySource.cpp@ad998a2fe9e2 |
children | 0d725dd7f99c |
comparison
equal
deleted
inserted
replaced
466:45054b36ddbf | 468:56acd9368532 |
---|---|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ | |
2 | |
3 /* | |
4 Sonic Visualiser | |
5 An audio file viewer and annotation editor. | |
6 Centre for Digital Music, Queen Mary, University of London. | |
7 This file copyright 2006 Chris Cannam and QMUL. | |
8 | |
9 This program is free software; you can redistribute it and/or | |
10 modify it under the terms of the GNU General Public License as | |
11 published by the Free Software Foundation; either version 2 of the | |
12 License, or (at your option) any later version. See the file | |
13 COPYING included with this distribution for more information. | |
14 */ | |
15 | |
16 #include "AudioCallbackPlaySource.h" | |
17 | |
18 #include "AudioGenerator.h" | |
19 | |
20 #include "data/model/Model.h" | |
21 #include "base/ViewManagerBase.h" | |
22 #include "base/PlayParameterRepository.h" | |
23 #include "base/Preferences.h" | |
24 #include "data/model/DenseTimeValueModel.h" | |
25 #include "data/model/WaveFileModel.h" | |
26 #include "data/model/SparseOneDimensionalModel.h" | |
27 #include "plugin/RealTimePluginInstance.h" | |
28 | |
29 #include "bqaudioio/SystemPlaybackTarget.h" | |
30 | |
31 #include <rubberband/RubberBandStretcher.h> | |
32 using namespace RubberBand; | |
33 | |
34 #include <iostream> | |
35 #include <cassert> | |
36 | |
37 //#define DEBUG_AUDIO_PLAY_SOURCE 1 | |
38 //#define DEBUG_AUDIO_PLAY_SOURCE_PLAYING 1 | |
39 | |
40 static const int DEFAULT_RING_BUFFER_SIZE = 131071; | |
41 | |
42 AudioCallbackPlaySource::AudioCallbackPlaySource(ViewManagerBase *manager, | |
43 QString clientName) : | |
44 m_viewManager(manager), | |
45 m_audioGenerator(new AudioGenerator()), | |
46 m_clientName(clientName.toUtf8().data()), | |
47 m_readBuffers(0), | |
48 m_writeBuffers(0), | |
49 m_readBufferFill(0), | |
50 m_writeBufferFill(0), | |
51 m_bufferScavenger(1), | |
52 m_sourceChannelCount(0), | |
53 m_blockSize(1024), | |
54 m_sourceSampleRate(0), | |
55 m_targetSampleRate(0), | |
56 m_playLatency(0), | |
57 m_target(0), | |
58 m_lastRetrievalTimestamp(0.0), | |
59 m_lastRetrievedBlockSize(0), | |
60 m_trustworthyTimestamps(true), | |
61 m_lastCurrentFrame(0), | |
62 m_playing(false), | |
63 m_exiting(false), | |
64 m_lastModelEndFrame(0), | |
65 m_ringBufferSize(DEFAULT_RING_BUFFER_SIZE), | |
66 m_outputLeft(0.0), | |
67 m_outputRight(0.0), | |
68 m_auditioningPlugin(0), | |
69 m_auditioningPluginBypassed(false), | |
70 m_playStartFrame(0), | |
71 m_playStartFramePassed(false), | |
72 m_timeStretcher(0), | |
73 m_monoStretcher(0), | |
74 m_stretchRatio(1.0), | |
75 m_stretchMono(false), | |
76 m_stretcherInputCount(0), | |
77 m_stretcherInputs(0), | |
78 m_stretcherInputSizes(0), | |
79 m_fillThread(0), | |
80 m_converter(0), | |
81 m_crapConverter(0), | |
82 m_resampleQuality(Preferences::getInstance()->getResampleQuality()) | |
83 { | |
84 m_viewManager->setAudioPlaySource(this); | |
85 | |
86 connect(m_viewManager, SIGNAL(selectionChanged()), | |
87 this, SLOT(selectionChanged())); | |
88 connect(m_viewManager, SIGNAL(playLoopModeChanged()), | |
89 this, SLOT(playLoopModeChanged())); | |
90 connect(m_viewManager, SIGNAL(playSelectionModeChanged()), | |
91 this, SLOT(playSelectionModeChanged())); | |
92 | |
93 connect(this, SIGNAL(playStatusChanged(bool)), | |
94 m_viewManager, SLOT(playStatusChanged(bool))); | |
95 | |
96 connect(PlayParameterRepository::getInstance(), | |
97 SIGNAL(playParametersChanged(PlayParameters *)), | |
98 this, SLOT(playParametersChanged(PlayParameters *))); | |
99 | |
100 connect(Preferences::getInstance(), | |
101 SIGNAL(propertyChanged(PropertyContainer::PropertyName)), | |
102 this, SLOT(preferenceChanged(PropertyContainer::PropertyName))); | |
103 } | |
104 | |
105 AudioCallbackPlaySource::~AudioCallbackPlaySource() | |
106 { | |
107 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
108 SVDEBUG << "AudioCallbackPlaySource::~AudioCallbackPlaySource entering" << endl; | |
109 #endif | |
110 m_exiting = true; | |
111 | |
112 if (m_fillThread) { | |
113 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
114 cout << "AudioCallbackPlaySource dtor: awakening thread" << endl; | |
115 #endif | |
116 m_condition.wakeAll(); | |
117 m_fillThread->wait(); | |
118 delete m_fillThread; | |
119 } | |
120 | |
121 clearModels(); | |
122 | |
123 if (m_readBuffers != m_writeBuffers) { | |
124 delete m_readBuffers; | |
125 } | |
126 | |
127 delete m_writeBuffers; | |
128 | |
129 delete m_audioGenerator; | |
130 | |
131 for (int i = 0; i < m_stretcherInputCount; ++i) { | |
132 delete[] m_stretcherInputs[i]; | |
133 } | |
134 delete[] m_stretcherInputSizes; | |
135 delete[] m_stretcherInputs; | |
136 | |
137 delete m_timeStretcher; | |
138 delete m_monoStretcher; | |
139 | |
140 m_bufferScavenger.scavenge(true); | |
141 m_pluginScavenger.scavenge(true); | |
142 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
143 SVDEBUG << "AudioCallbackPlaySource::~AudioCallbackPlaySource finishing" << endl; | |
144 #endif | |
145 } | |
146 | |
147 void | |
148 AudioCallbackPlaySource::addModel(Model *model) | |
149 { | |
150 if (m_models.find(model) != m_models.end()) return; | |
151 | |
152 bool willPlay = m_audioGenerator->addModel(model); | |
153 | |
154 m_mutex.lock(); | |
155 | |
156 m_models.insert(model); | |
157 if (model->getEndFrame() > m_lastModelEndFrame) { | |
158 m_lastModelEndFrame = model->getEndFrame(); | |
159 } | |
160 | |
161 bool buffersChanged = false, srChanged = false; | |
162 | |
163 int modelChannels = 1; | |
164 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model); | |
165 if (dtvm) modelChannels = dtvm->getChannelCount(); | |
166 if (modelChannels > m_sourceChannelCount) { | |
167 m_sourceChannelCount = modelChannels; | |
168 } | |
169 | |
170 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
171 cout << "AudioCallbackPlaySource: Adding model with " << modelChannels << " channels at rate " << model->getSampleRate() << endl; | |
172 #endif | |
173 | |
174 if (m_sourceSampleRate == 0) { | |
175 | |
176 m_sourceSampleRate = model->getSampleRate(); | |
177 srChanged = true; | |
178 | |
179 } else if (model->getSampleRate() != m_sourceSampleRate) { | |
180 | |
181 // If this is a dense time-value model and we have no other, we | |
182 // can just switch to this model's sample rate | |
183 | |
184 if (dtvm) { | |
185 | |
186 bool conflicting = false; | |
187 | |
188 for (std::set<Model *>::const_iterator i = m_models.begin(); | |
189 i != m_models.end(); ++i) { | |
190 // Only wave file models can be considered conflicting -- | |
191 // writable wave file models are derived and we shouldn't | |
192 // take their rates into account. Also, don't give any | |
193 // particular weight to a file that's already playing at | |
194 // the wrong rate anyway | |
195 WaveFileModel *wfm = dynamic_cast<WaveFileModel *>(*i); | |
196 if (wfm && wfm != dtvm && | |
197 wfm->getSampleRate() != model->getSampleRate() && | |
198 wfm->getSampleRate() == m_sourceSampleRate) { | |
199 SVDEBUG << "AudioCallbackPlaySource::addModel: Conflicting wave file model " << *i << " found" << endl; | |
200 conflicting = true; | |
201 break; | |
202 } | |
203 } | |
204 | |
205 if (conflicting) { | |
206 | |
207 SVDEBUG << "AudioCallbackPlaySource::addModel: ERROR: " | |
208 << "New model sample rate does not match" << endl | |
209 << "existing model(s) (new " << model->getSampleRate() | |
210 << " vs " << m_sourceSampleRate | |
211 << "), playback will be wrong" | |
212 << endl; | |
213 | |
214 emit sampleRateMismatch(model->getSampleRate(), | |
215 m_sourceSampleRate, | |
216 false); | |
217 } else { | |
218 m_sourceSampleRate = model->getSampleRate(); | |
219 srChanged = true; | |
220 } | |
221 } | |
222 } | |
223 | |
224 if (!m_writeBuffers || (int)m_writeBuffers->size() < getTargetChannelCount()) { | |
225 clearRingBuffers(true, getTargetChannelCount()); | |
226 buffersChanged = true; | |
227 } else { | |
228 if (willPlay) clearRingBuffers(true); | |
229 } | |
230 | |
231 if (buffersChanged || srChanged) { | |
232 if (m_converter) { | |
233 src_delete(m_converter); | |
234 src_delete(m_crapConverter); | |
235 m_converter = 0; | |
236 m_crapConverter = 0; | |
237 } | |
238 } | |
239 | |
240 rebuildRangeLists(); | |
241 | |
242 m_mutex.unlock(); | |
243 | |
244 m_audioGenerator->setTargetChannelCount(getTargetChannelCount()); | |
245 | |
246 if (!m_fillThread) { | |
247 m_fillThread = new FillThread(*this); | |
248 m_fillThread->start(); | |
249 } | |
250 | |
251 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
252 cout << "AudioCallbackPlaySource::addModel: now have " << m_models.size() << " model(s) -- emitting modelReplaced" << endl; | |
253 #endif | |
254 | |
255 if (buffersChanged || srChanged) { | |
256 emit modelReplaced(); | |
257 } | |
258 | |
259 connect(model, SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)), | |
260 this, SLOT(modelChangedWithin(sv_frame_t, sv_frame_t))); | |
261 | |
262 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
263 cout << "AudioCallbackPlaySource::addModel: awakening thread" << endl; | |
264 #endif | |
265 | |
266 m_condition.wakeAll(); | |
267 } | |
268 | |
269 void | |
270 AudioCallbackPlaySource::modelChangedWithin(sv_frame_t | |
271 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
272 startFrame | |
273 #endif | |
274 , sv_frame_t endFrame) | |
275 { | |
276 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
277 SVDEBUG << "AudioCallbackPlaySource::modelChangedWithin(" << startFrame << "," << endFrame << ")" << endl; | |
278 #endif | |
279 if (endFrame > m_lastModelEndFrame) { | |
280 m_lastModelEndFrame = endFrame; | |
281 rebuildRangeLists(); | |
282 } | |
283 } | |
284 | |
285 void | |
286 AudioCallbackPlaySource::removeModel(Model *model) | |
287 { | |
288 m_mutex.lock(); | |
289 | |
290 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
291 cout << "AudioCallbackPlaySource::removeModel(" << model << ")" << endl; | |
292 #endif | |
293 | |
294 disconnect(model, SIGNAL(modelChangedWithin(sv_frame_t, sv_frame_t)), | |
295 this, SLOT(modelChangedWithin(sv_frame_t, sv_frame_t))); | |
296 | |
297 m_models.erase(model); | |
298 | |
299 if (m_models.empty()) { | |
300 if (m_converter) { | |
301 src_delete(m_converter); | |
302 src_delete(m_crapConverter); | |
303 m_converter = 0; | |
304 m_crapConverter = 0; | |
305 } | |
306 m_sourceSampleRate = 0; | |
307 } | |
308 | |
309 sv_frame_t lastEnd = 0; | |
310 for (std::set<Model *>::const_iterator i = m_models.begin(); | |
311 i != m_models.end(); ++i) { | |
312 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
313 cout << "AudioCallbackPlaySource::removeModel(" << model << "): checking end frame on model " << *i << endl; | |
314 #endif | |
315 if ((*i)->getEndFrame() > lastEnd) { | |
316 lastEnd = (*i)->getEndFrame(); | |
317 } | |
318 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
319 cout << "(done, lastEnd now " << lastEnd << ")" << endl; | |
320 #endif | |
321 } | |
322 m_lastModelEndFrame = lastEnd; | |
323 | |
324 m_audioGenerator->removeModel(model); | |
325 | |
326 m_mutex.unlock(); | |
327 | |
328 clearRingBuffers(); | |
329 } | |
330 | |
331 void | |
332 AudioCallbackPlaySource::clearModels() | |
333 { | |
334 m_mutex.lock(); | |
335 | |
336 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
337 cout << "AudioCallbackPlaySource::clearModels()" << endl; | |
338 #endif | |
339 | |
340 m_models.clear(); | |
341 | |
342 if (m_converter) { | |
343 src_delete(m_converter); | |
344 src_delete(m_crapConverter); | |
345 m_converter = 0; | |
346 m_crapConverter = 0; | |
347 } | |
348 | |
349 m_lastModelEndFrame = 0; | |
350 | |
351 m_sourceSampleRate = 0; | |
352 | |
353 m_mutex.unlock(); | |
354 | |
355 m_audioGenerator->clearModels(); | |
356 | |
357 clearRingBuffers(); | |
358 } | |
359 | |
360 void | |
361 AudioCallbackPlaySource::clearRingBuffers(bool haveLock, int count) | |
362 { | |
363 if (!haveLock) m_mutex.lock(); | |
364 | |
365 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
366 cerr << "clearRingBuffers" << endl; | |
367 #endif | |
368 | |
369 rebuildRangeLists(); | |
370 | |
371 if (count == 0) { | |
372 if (m_writeBuffers) count = int(m_writeBuffers->size()); | |
373 } | |
374 | |
375 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
376 cerr << "current playing frame = " << getCurrentPlayingFrame() << endl; | |
377 | |
378 cerr << "write buffer fill (before) = " << m_writeBufferFill << endl; | |
379 #endif | |
380 | |
381 m_writeBufferFill = getCurrentBufferedFrame(); | |
382 | |
383 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
384 cerr << "current buffered frame = " << m_writeBufferFill << endl; | |
385 #endif | |
386 | |
387 if (m_readBuffers != m_writeBuffers) { | |
388 delete m_writeBuffers; | |
389 } | |
390 | |
391 m_writeBuffers = new RingBufferVector; | |
392 | |
393 for (int i = 0; i < count; ++i) { | |
394 m_writeBuffers->push_back(new RingBuffer<float>(m_ringBufferSize)); | |
395 } | |
396 | |
397 m_audioGenerator->reset(); | |
398 | |
399 // cout << "AudioCallbackPlaySource::clearRingBuffers: Created " | |
400 // << count << " write buffers" << endl; | |
401 | |
402 if (!haveLock) { | |
403 m_mutex.unlock(); | |
404 } | |
405 } | |
406 | |
407 void | |
408 AudioCallbackPlaySource::play(sv_frame_t startFrame) | |
409 { | |
410 if (!m_sourceSampleRate) { | |
411 cerr << "AudioCallbackPlaySource::play: No source sample rate available, not playing" << endl; | |
412 return; | |
413 } | |
414 | |
415 if (m_viewManager->getPlaySelectionMode() && | |
416 !m_viewManager->getSelections().empty()) { | |
417 | |
418 SVDEBUG << "AudioCallbackPlaySource::play: constraining frame " << startFrame << " to selection = "; | |
419 | |
420 startFrame = m_viewManager->constrainFrameToSelection(startFrame); | |
421 | |
422 SVDEBUG << startFrame << endl; | |
423 | |
424 } else { | |
425 if (startFrame < 0) { | |
426 startFrame = 0; | |
427 } | |
428 if (startFrame >= m_lastModelEndFrame) { | |
429 startFrame = 0; | |
430 } | |
431 } | |
432 | |
433 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
434 cerr << "play(" << startFrame << ") -> playback model "; | |
435 #endif | |
436 | |
437 startFrame = m_viewManager->alignReferenceToPlaybackFrame(startFrame); | |
438 | |
439 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
440 cerr << startFrame << endl; | |
441 #endif | |
442 | |
443 // The fill thread will automatically empty its buffers before | |
444 // starting again if we have not so far been playing, but not if | |
445 // we're just re-seeking. | |
446 // NO -- we can end up playing some first -- always reset here | |
447 | |
448 m_mutex.lock(); | |
449 | |
450 if (m_timeStretcher) { | |
451 m_timeStretcher->reset(); | |
452 } | |
453 if (m_monoStretcher) { | |
454 m_monoStretcher->reset(); | |
455 } | |
456 | |
457 m_readBufferFill = m_writeBufferFill = startFrame; | |
458 if (m_readBuffers) { | |
459 for (int c = 0; c < getTargetChannelCount(); ++c) { | |
460 RingBuffer<float> *rb = getReadRingBuffer(c); | |
461 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
462 cerr << "reset ring buffer for channel " << c << endl; | |
463 #endif | |
464 if (rb) rb->reset(); | |
465 } | |
466 } | |
467 if (m_converter) src_reset(m_converter); | |
468 if (m_crapConverter) src_reset(m_crapConverter); | |
469 | |
470 m_mutex.unlock(); | |
471 | |
472 m_audioGenerator->reset(); | |
473 | |
474 m_playStartFrame = startFrame; | |
475 m_playStartFramePassed = false; | |
476 m_playStartedAt = RealTime::zeroTime; | |
477 if (m_target) { | |
478 m_playStartedAt = RealTime::fromSeconds(m_target->getCurrentTime()); | |
479 } | |
480 | |
481 bool changed = !m_playing; | |
482 m_lastRetrievalTimestamp = 0; | |
483 m_lastCurrentFrame = 0; | |
484 m_playing = true; | |
485 | |
486 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
487 cout << "AudioCallbackPlaySource::play: awakening thread" << endl; | |
488 #endif | |
489 | |
490 m_condition.wakeAll(); | |
491 if (changed) { | |
492 emit playStatusChanged(m_playing); | |
493 emit activity(tr("Play from %1").arg | |
494 (RealTime::frame2RealTime | |
495 (m_playStartFrame, m_sourceSampleRate).toText().c_str())); | |
496 } | |
497 } | |
498 | |
499 void | |
500 AudioCallbackPlaySource::stop() | |
501 { | |
502 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
503 SVDEBUG << "AudioCallbackPlaySource::stop()" << endl; | |
504 #endif | |
505 bool changed = m_playing; | |
506 m_playing = false; | |
507 | |
508 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
509 cout << "AudioCallbackPlaySource::stop: awakening thread" << endl; | |
510 #endif | |
511 | |
512 m_condition.wakeAll(); | |
513 m_lastRetrievalTimestamp = 0; | |
514 if (changed) { | |
515 emit playStatusChanged(m_playing); | |
516 emit activity(tr("Stop at %1").arg | |
517 (RealTime::frame2RealTime | |
518 (m_lastCurrentFrame, m_sourceSampleRate).toText().c_str())); | |
519 } | |
520 m_lastCurrentFrame = 0; | |
521 } | |
522 | |
523 void | |
524 AudioCallbackPlaySource::selectionChanged() | |
525 { | |
526 if (m_viewManager->getPlaySelectionMode()) { | |
527 clearRingBuffers(); | |
528 } | |
529 } | |
530 | |
531 void | |
532 AudioCallbackPlaySource::playLoopModeChanged() | |
533 { | |
534 clearRingBuffers(); | |
535 } | |
536 | |
537 void | |
538 AudioCallbackPlaySource::playSelectionModeChanged() | |
539 { | |
540 if (!m_viewManager->getSelections().empty()) { | |
541 clearRingBuffers(); | |
542 } | |
543 } | |
544 | |
545 void | |
546 AudioCallbackPlaySource::playParametersChanged(PlayParameters *) | |
547 { | |
548 clearRingBuffers(); | |
549 } | |
550 | |
551 void | |
552 AudioCallbackPlaySource::preferenceChanged(PropertyContainer::PropertyName n) | |
553 { | |
554 if (n == "Resample Quality") { | |
555 setResampleQuality(Preferences::getInstance()->getResampleQuality()); | |
556 } | |
557 } | |
558 | |
559 void | |
560 AudioCallbackPlaySource::audioProcessingOverload() | |
561 { | |
562 cerr << "Audio processing overload!" << endl; | |
563 | |
564 if (!m_playing) return; | |
565 | |
566 RealTimePluginInstance *ap = m_auditioningPlugin; | |
567 if (ap && !m_auditioningPluginBypassed) { | |
568 m_auditioningPluginBypassed = true; | |
569 emit audioOverloadPluginDisabled(); | |
570 return; | |
571 } | |
572 | |
573 if (m_timeStretcher && | |
574 m_timeStretcher->getTimeRatio() < 1.0 && | |
575 m_stretcherInputCount > 1 && | |
576 m_monoStretcher && !m_stretchMono) { | |
577 m_stretchMono = true; | |
578 emit audioTimeStretchMultiChannelDisabled(); | |
579 return; | |
580 } | |
581 } | |
582 | |
583 void | |
584 AudioCallbackPlaySource::setSystemPlaybackTarget(breakfastquay::SystemPlaybackTarget *target) | |
585 { | |
586 m_target = target; | |
587 } | |
588 | |
589 void | |
590 AudioCallbackPlaySource::setSystemPlaybackBlockSize(int size) | |
591 { | |
592 cout << "AudioCallbackPlaySource::setTarget: Block size -> " << size << endl; | |
593 if (size != 0) { | |
594 m_blockSize = size; | |
595 } | |
596 if (size * 4 > m_ringBufferSize) { | |
597 SVDEBUG << "AudioCallbackPlaySource::setTarget: Buffer size " | |
598 << size << " > a quarter of ring buffer size " | |
599 << m_ringBufferSize << ", calling for more ring buffer" | |
600 << endl; | |
601 m_ringBufferSize = size * 4; | |
602 if (m_writeBuffers && !m_writeBuffers->empty()) { | |
603 clearRingBuffers(); | |
604 } | |
605 } | |
606 } | |
607 | |
608 int | |
609 AudioCallbackPlaySource::getTargetBlockSize() const | |
610 { | |
611 // cout << "AudioCallbackPlaySource::getTargetBlockSize() -> " << m_blockSize << endl; | |
612 return int(m_blockSize); | |
613 } | |
614 | |
615 void | |
616 AudioCallbackPlaySource::setSystemPlaybackLatency(int latency) | |
617 { | |
618 m_playLatency = latency; | |
619 } | |
620 | |
621 sv_frame_t | |
622 AudioCallbackPlaySource::getTargetPlayLatency() const | |
623 { | |
624 return m_playLatency; | |
625 } | |
626 | |
627 sv_frame_t | |
628 AudioCallbackPlaySource::getCurrentPlayingFrame() | |
629 { | |
630 // This method attempts to estimate which audio sample frame is | |
631 // "currently coming through the speakers". | |
632 | |
633 sv_samplerate_t targetRate = getTargetSampleRate(); | |
634 sv_frame_t latency = m_playLatency; // at target rate | |
635 RealTime latency_t = RealTime::zeroTime; | |
636 | |
637 if (targetRate != 0) { | |
638 latency_t = RealTime::frame2RealTime(latency, targetRate); | |
639 } | |
640 | |
641 return getCurrentFrame(latency_t); | |
642 } | |
643 | |
644 sv_frame_t | |
645 AudioCallbackPlaySource::getCurrentBufferedFrame() | |
646 { | |
647 return getCurrentFrame(RealTime::zeroTime); | |
648 } | |
649 | |
650 sv_frame_t | |
651 AudioCallbackPlaySource::getCurrentFrame(RealTime latency_t) | |
652 { | |
653 // We resample when filling the ring buffer, and time-stretch when | |
654 // draining it. The buffer contains data at the "target rate" and | |
655 // the latency provided by the target is also at the target rate. | |
656 // Because of the multiple rates involved, we do the actual | |
657 // calculation using RealTime instead. | |
658 | |
659 sv_samplerate_t sourceRate = getSourceSampleRate(); | |
660 sv_samplerate_t targetRate = getTargetSampleRate(); | |
661 | |
662 if (sourceRate == 0 || targetRate == 0) return 0; | |
663 | |
664 int inbuffer = 0; // at target rate | |
665 | |
666 for (int c = 0; c < getTargetChannelCount(); ++c) { | |
667 RingBuffer<float> *rb = getReadRingBuffer(c); | |
668 if (rb) { | |
669 int here = rb->getReadSpace(); | |
670 if (c == 0 || here < inbuffer) inbuffer = here; | |
671 } | |
672 } | |
673 | |
674 sv_frame_t readBufferFill = m_readBufferFill; | |
675 sv_frame_t lastRetrievedBlockSize = m_lastRetrievedBlockSize; | |
676 double lastRetrievalTimestamp = m_lastRetrievalTimestamp; | |
677 double currentTime = 0.0; | |
678 if (m_target) currentTime = m_target->getCurrentTime(); | |
679 | |
680 bool looping = m_viewManager->getPlayLoopMode(); | |
681 | |
682 RealTime inbuffer_t = RealTime::frame2RealTime(inbuffer, targetRate); | |
683 | |
684 sv_frame_t stretchlat = 0; | |
685 double timeRatio = 1.0; | |
686 | |
687 if (m_timeStretcher) { | |
688 stretchlat = m_timeStretcher->getLatency(); | |
689 timeRatio = m_timeStretcher->getTimeRatio(); | |
690 } | |
691 | |
692 RealTime stretchlat_t = RealTime::frame2RealTime(stretchlat, targetRate); | |
693 | |
694 // When the target has just requested a block from us, the last | |
695 // sample it obtained was our buffer fill frame count minus the | |
696 // amount of read space (converted back to source sample rate) | |
697 // remaining now. That sample is not expected to be played until | |
698 // the target's play latency has elapsed. By the time the | |
699 // following block is requested, that sample will be at the | |
700 // target's play latency minus the last requested block size away | |
701 // from being played. | |
702 | |
703 RealTime sincerequest_t = RealTime::zeroTime; | |
704 RealTime lastretrieved_t = RealTime::zeroTime; | |
705 | |
706 if (m_target && | |
707 m_trustworthyTimestamps && | |
708 lastRetrievalTimestamp != 0.0) { | |
709 | |
710 lastretrieved_t = RealTime::frame2RealTime | |
711 (lastRetrievedBlockSize, targetRate); | |
712 | |
713 // calculate number of frames at target rate that have elapsed | |
714 // since the end of the last call to getSourceSamples | |
715 | |
716 if (m_trustworthyTimestamps && !looping) { | |
717 | |
718 // this adjustment seems to cause more problems when looping | |
719 double elapsed = currentTime - lastRetrievalTimestamp; | |
720 | |
721 if (elapsed > 0.0) { | |
722 sincerequest_t = RealTime::fromSeconds(elapsed); | |
723 } | |
724 } | |
725 | |
726 } else { | |
727 | |
728 lastretrieved_t = RealTime::frame2RealTime | |
729 (getTargetBlockSize(), targetRate); | |
730 } | |
731 | |
732 RealTime bufferedto_t = RealTime::frame2RealTime(readBufferFill, sourceRate); | |
733 | |
734 if (timeRatio != 1.0) { | |
735 lastretrieved_t = lastretrieved_t / timeRatio; | |
736 sincerequest_t = sincerequest_t / timeRatio; | |
737 latency_t = latency_t / timeRatio; | |
738 } | |
739 | |
740 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
741 cerr << "\nbuffered to: " << bufferedto_t << ", in buffer: " << inbuffer_t << ", time ratio " << timeRatio << "\n stretcher latency: " << stretchlat_t << ", device latency: " << latency_t << "\n since request: " << sincerequest_t << ", last retrieved quantity: " << lastretrieved_t << endl; | |
742 #endif | |
743 | |
744 // Normally the range lists should contain at least one item each | |
745 // -- if playback is unconstrained, that item should report the | |
746 // entire source audio duration. | |
747 | |
748 if (m_rangeStarts.empty()) { | |
749 rebuildRangeLists(); | |
750 } | |
751 | |
752 if (m_rangeStarts.empty()) { | |
753 // this code is only used in case of error in rebuildRangeLists | |
754 RealTime playing_t = bufferedto_t | |
755 - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t | |
756 + sincerequest_t; | |
757 if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime; | |
758 sv_frame_t frame = RealTime::realTime2Frame(playing_t, sourceRate); | |
759 return m_viewManager->alignPlaybackFrameToReference(frame); | |
760 } | |
761 | |
762 int inRange = 0; | |
763 int index = 0; | |
764 | |
765 for (int i = 0; i < (int)m_rangeStarts.size(); ++i) { | |
766 if (bufferedto_t >= m_rangeStarts[i]) { | |
767 inRange = index; | |
768 } else { | |
769 break; | |
770 } | |
771 ++index; | |
772 } | |
773 | |
774 if (inRange >= int(m_rangeStarts.size())) { | |
775 inRange = int(m_rangeStarts.size())-1; | |
776 } | |
777 | |
778 RealTime playing_t = bufferedto_t; | |
779 | |
780 playing_t = playing_t | |
781 - latency_t - stretchlat_t - lastretrieved_t - inbuffer_t | |
782 + sincerequest_t; | |
783 | |
784 // This rather gross little hack is used to ensure that latency | |
785 // compensation doesn't result in the playback pointer appearing | |
786 // to start earlier than the actual playback does. It doesn't | |
787 // work properly (hence the bail-out in the middle) because if we | |
788 // are playing a relatively short looped region, the playing time | |
789 // estimated from the buffer fill frame may have wrapped around | |
790 // the region boundary and end up being much smaller than the | |
791 // theoretical play start frame, perhaps even for the entire | |
792 // duration of playback! | |
793 | |
794 if (!m_playStartFramePassed) { | |
795 RealTime playstart_t = RealTime::frame2RealTime(m_playStartFrame, | |
796 sourceRate); | |
797 if (playing_t < playstart_t) { | |
798 // cerr << "playing_t " << playing_t << " < playstart_t " | |
799 // << playstart_t << endl; | |
800 if (/*!!! sincerequest_t > RealTime::zeroTime && */ | |
801 m_playStartedAt + latency_t + stretchlat_t < | |
802 RealTime::fromSeconds(currentTime)) { | |
803 // cerr << "but we've been playing for long enough that I think we should disregard it (it probably results from loop wrapping)" << endl; | |
804 m_playStartFramePassed = true; | |
805 } else { | |
806 playing_t = playstart_t; | |
807 } | |
808 } else { | |
809 m_playStartFramePassed = true; | |
810 } | |
811 } | |
812 | |
813 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
814 cerr << "playing_t " << playing_t; | |
815 #endif | |
816 | |
817 playing_t = playing_t - m_rangeStarts[inRange]; | |
818 | |
819 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
820 cerr << " as offset into range " << inRange << " (start =" << m_rangeStarts[inRange] << " duration =" << m_rangeDurations[inRange] << ") = " << playing_t << endl; | |
821 #endif | |
822 | |
823 while (playing_t < RealTime::zeroTime) { | |
824 | |
825 if (inRange == 0) { | |
826 if (looping) { | |
827 inRange = int(m_rangeStarts.size()) - 1; | |
828 } else { | |
829 break; | |
830 } | |
831 } else { | |
832 --inRange; | |
833 } | |
834 | |
835 playing_t = playing_t + m_rangeDurations[inRange]; | |
836 } | |
837 | |
838 playing_t = playing_t + m_rangeStarts[inRange]; | |
839 | |
840 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
841 cerr << " playing time: " << playing_t << endl; | |
842 #endif | |
843 | |
844 if (!looping) { | |
845 if (inRange == (int)m_rangeStarts.size()-1 && | |
846 playing_t >= m_rangeStarts[inRange] + m_rangeDurations[inRange]) { | |
847 cerr << "Not looping, inRange " << inRange << " == rangeStarts.size()-1, playing_t " << playing_t << " >= m_rangeStarts[inRange] " << m_rangeStarts[inRange] << " + m_rangeDurations[inRange] " << m_rangeDurations[inRange] << " -- stopping" << endl; | |
848 stop(); | |
849 } | |
850 } | |
851 | |
852 if (playing_t < RealTime::zeroTime) playing_t = RealTime::zeroTime; | |
853 | |
854 sv_frame_t frame = RealTime::realTime2Frame(playing_t, sourceRate); | |
855 | |
856 if (m_lastCurrentFrame > 0 && !looping) { | |
857 if (frame < m_lastCurrentFrame) { | |
858 frame = m_lastCurrentFrame; | |
859 } | |
860 } | |
861 | |
862 m_lastCurrentFrame = frame; | |
863 | |
864 return m_viewManager->alignPlaybackFrameToReference(frame); | |
865 } | |
866 | |
867 void | |
868 AudioCallbackPlaySource::rebuildRangeLists() | |
869 { | |
870 bool constrained = (m_viewManager->getPlaySelectionMode()); | |
871 | |
872 m_rangeStarts.clear(); | |
873 m_rangeDurations.clear(); | |
874 | |
875 sv_samplerate_t sourceRate = getSourceSampleRate(); | |
876 if (sourceRate == 0) return; | |
877 | |
878 RealTime end = RealTime::frame2RealTime(m_lastModelEndFrame, sourceRate); | |
879 if (end == RealTime::zeroTime) return; | |
880 | |
881 if (!constrained) { | |
882 m_rangeStarts.push_back(RealTime::zeroTime); | |
883 m_rangeDurations.push_back(end); | |
884 return; | |
885 } | |
886 | |
887 MultiSelection::SelectionList selections = m_viewManager->getSelections(); | |
888 MultiSelection::SelectionList::const_iterator i; | |
889 | |
890 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
891 SVDEBUG << "AudioCallbackPlaySource::rebuildRangeLists" << endl; | |
892 #endif | |
893 | |
894 if (!selections.empty()) { | |
895 | |
896 for (i = selections.begin(); i != selections.end(); ++i) { | |
897 | |
898 RealTime start = | |
899 (RealTime::frame2RealTime | |
900 (m_viewManager->alignReferenceToPlaybackFrame(i->getStartFrame()), | |
901 sourceRate)); | |
902 RealTime duration = | |
903 (RealTime::frame2RealTime | |
904 (m_viewManager->alignReferenceToPlaybackFrame(i->getEndFrame()) - | |
905 m_viewManager->alignReferenceToPlaybackFrame(i->getStartFrame()), | |
906 sourceRate)); | |
907 | |
908 m_rangeStarts.push_back(start); | |
909 m_rangeDurations.push_back(duration); | |
910 } | |
911 } else { | |
912 m_rangeStarts.push_back(RealTime::zeroTime); | |
913 m_rangeDurations.push_back(end); | |
914 } | |
915 | |
916 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
917 cerr << "Now have " << m_rangeStarts.size() << " play ranges" << endl; | |
918 #endif | |
919 } | |
920 | |
921 void | |
922 AudioCallbackPlaySource::setOutputLevels(float left, float right) | |
923 { | |
924 m_outputLeft = left; | |
925 m_outputRight = right; | |
926 } | |
927 | |
928 bool | |
929 AudioCallbackPlaySource::getOutputLevels(float &left, float &right) | |
930 { | |
931 left = m_outputLeft; | |
932 right = m_outputRight; | |
933 return true; | |
934 } | |
935 | |
936 void | |
937 AudioCallbackPlaySource::setSystemPlaybackSampleRate(int sr) | |
938 { | |
939 bool first = (m_targetSampleRate == 0); | |
940 | |
941 m_targetSampleRate = sr; | |
942 initialiseConverter(); | |
943 | |
944 if (first && (m_stretchRatio != 1.f)) { | |
945 // couldn't create a stretcher before because we had no sample | |
946 // rate: make one now | |
947 setTimeStretch(m_stretchRatio); | |
948 } | |
949 } | |
950 | |
951 void | |
952 AudioCallbackPlaySource::initialiseConverter() | |
953 { | |
954 m_mutex.lock(); | |
955 | |
956 if (m_converter) { | |
957 src_delete(m_converter); | |
958 src_delete(m_crapConverter); | |
959 m_converter = 0; | |
960 m_crapConverter = 0; | |
961 } | |
962 | |
963 if (getSourceSampleRate() != getTargetSampleRate()) { | |
964 | |
965 int err = 0; | |
966 | |
967 m_converter = src_new(m_resampleQuality == 2 ? SRC_SINC_BEST_QUALITY : | |
968 m_resampleQuality == 1 ? SRC_SINC_MEDIUM_QUALITY : | |
969 m_resampleQuality == 0 ? SRC_SINC_FASTEST : | |
970 SRC_SINC_MEDIUM_QUALITY, | |
971 getTargetChannelCount(), &err); | |
972 | |
973 if (m_converter) { | |
974 m_crapConverter = src_new(SRC_LINEAR, | |
975 getTargetChannelCount(), | |
976 &err); | |
977 } | |
978 | |
979 if (!m_converter || !m_crapConverter) { | |
980 cerr | |
981 << "AudioCallbackPlaySource::setModel: ERROR in creating samplerate converter: " | |
982 << src_strerror(err) << endl; | |
983 | |
984 if (m_converter) { | |
985 src_delete(m_converter); | |
986 m_converter = 0; | |
987 } | |
988 | |
989 if (m_crapConverter) { | |
990 src_delete(m_crapConverter); | |
991 m_crapConverter = 0; | |
992 } | |
993 | |
994 m_mutex.unlock(); | |
995 | |
996 emit sampleRateMismatch(getSourceSampleRate(), | |
997 getTargetSampleRate(), | |
998 false); | |
999 } else { | |
1000 | |
1001 m_mutex.unlock(); | |
1002 | |
1003 emit sampleRateMismatch(getSourceSampleRate(), | |
1004 getTargetSampleRate(), | |
1005 true); | |
1006 } | |
1007 } else { | |
1008 m_mutex.unlock(); | |
1009 } | |
1010 } | |
1011 | |
1012 void | |
1013 AudioCallbackPlaySource::setResampleQuality(int q) | |
1014 { | |
1015 if (q == m_resampleQuality) return; | |
1016 m_resampleQuality = q; | |
1017 | |
1018 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1019 SVDEBUG << "AudioCallbackPlaySource::setResampleQuality: setting to " | |
1020 << m_resampleQuality << endl; | |
1021 #endif | |
1022 | |
1023 initialiseConverter(); | |
1024 } | |
1025 | |
1026 void | |
1027 AudioCallbackPlaySource::setAuditioningEffect(Auditionable *a) | |
1028 { | |
1029 RealTimePluginInstance *plugin = dynamic_cast<RealTimePluginInstance *>(a); | |
1030 if (a && !plugin) { | |
1031 cerr << "WARNING: AudioCallbackPlaySource::setAuditioningEffect: auditionable object " << a << " is not a real-time plugin instance" << endl; | |
1032 } | |
1033 | |
1034 m_mutex.lock(); | |
1035 m_auditioningPlugin = plugin; | |
1036 m_auditioningPluginBypassed = false; | |
1037 m_mutex.unlock(); | |
1038 } | |
1039 | |
1040 void | |
1041 AudioCallbackPlaySource::setSoloModelSet(std::set<Model *> s) | |
1042 { | |
1043 m_audioGenerator->setSoloModelSet(s); | |
1044 clearRingBuffers(); | |
1045 } | |
1046 | |
1047 void | |
1048 AudioCallbackPlaySource::clearSoloModelSet() | |
1049 { | |
1050 m_audioGenerator->clearSoloModelSet(); | |
1051 clearRingBuffers(); | |
1052 } | |
1053 | |
1054 sv_samplerate_t | |
1055 AudioCallbackPlaySource::getTargetSampleRate() const | |
1056 { | |
1057 if (m_targetSampleRate) return m_targetSampleRate; | |
1058 else return getSourceSampleRate(); | |
1059 } | |
1060 | |
1061 int | |
1062 AudioCallbackPlaySource::getSourceChannelCount() const | |
1063 { | |
1064 return m_sourceChannelCount; | |
1065 } | |
1066 | |
1067 int | |
1068 AudioCallbackPlaySource::getTargetChannelCount() const | |
1069 { | |
1070 if (m_sourceChannelCount < 2) return 2; | |
1071 return m_sourceChannelCount; | |
1072 } | |
1073 | |
1074 sv_samplerate_t | |
1075 AudioCallbackPlaySource::getSourceSampleRate() const | |
1076 { | |
1077 return m_sourceSampleRate; | |
1078 } | |
1079 | |
1080 void | |
1081 AudioCallbackPlaySource::setTimeStretch(double factor) | |
1082 { | |
1083 m_stretchRatio = factor; | |
1084 | |
1085 if (!getTargetSampleRate()) return; // have to make our stretcher later | |
1086 | |
1087 if (m_timeStretcher || (factor == 1.0)) { | |
1088 // stretch ratio will be set in next process call if appropriate | |
1089 } else { | |
1090 m_stretcherInputCount = getTargetChannelCount(); | |
1091 RubberBandStretcher *stretcher = new RubberBandStretcher | |
1092 (int(getTargetSampleRate()), | |
1093 m_stretcherInputCount, | |
1094 RubberBandStretcher::OptionProcessRealTime, | |
1095 factor); | |
1096 RubberBandStretcher *monoStretcher = new RubberBandStretcher | |
1097 (int(getTargetSampleRate()), | |
1098 1, | |
1099 RubberBandStretcher::OptionProcessRealTime, | |
1100 factor); | |
1101 m_stretcherInputs = new float *[m_stretcherInputCount]; | |
1102 m_stretcherInputSizes = new sv_frame_t[m_stretcherInputCount]; | |
1103 for (int c = 0; c < m_stretcherInputCount; ++c) { | |
1104 m_stretcherInputSizes[c] = 16384; | |
1105 m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]]; | |
1106 } | |
1107 m_monoStretcher = monoStretcher; | |
1108 m_timeStretcher = stretcher; | |
1109 } | |
1110 | |
1111 emit activity(tr("Change time-stretch factor to %1").arg(factor)); | |
1112 } | |
1113 | |
1114 void | |
1115 AudioCallbackPlaySource::getSourceSamples(int count, float **buffer) | |
1116 { | |
1117 if (!m_playing) { | |
1118 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
1119 SVDEBUG << "AudioCallbackPlaySource::getSourceSamples: Not playing" << endl; | |
1120 #endif | |
1121 for (int ch = 0; ch < getTargetChannelCount(); ++ch) { | |
1122 for (int i = 0; i < count; ++i) { | |
1123 buffer[ch][i] = 0.0; | |
1124 } | |
1125 } | |
1126 return; | |
1127 } | |
1128 | |
1129 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
1130 SVDEBUG << "AudioCallbackPlaySource::getSourceSamples: Playing" << endl; | |
1131 #endif | |
1132 | |
1133 // Ensure that all buffers have at least the amount of data we | |
1134 // need -- else reduce the size of our requests correspondingly | |
1135 | |
1136 for (int ch = 0; ch < getTargetChannelCount(); ++ch) { | |
1137 | |
1138 RingBuffer<float> *rb = getReadRingBuffer(ch); | |
1139 | |
1140 if (!rb) { | |
1141 cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: " | |
1142 << "No ring buffer available for channel " << ch | |
1143 << ", returning no data here" << endl; | |
1144 count = 0; | |
1145 break; | |
1146 } | |
1147 | |
1148 int rs = rb->getReadSpace(); | |
1149 if (rs < count) { | |
1150 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1151 cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: " | |
1152 << "Ring buffer for channel " << ch << " has only " | |
1153 << rs << " (of " << count << ") samples available (" | |
1154 << "ring buffer size is " << rb->getSize() << ", write " | |
1155 << "space " << rb->getWriteSpace() << "), " | |
1156 << "reducing request size" << endl; | |
1157 #endif | |
1158 count = rs; | |
1159 } | |
1160 } | |
1161 | |
1162 if (count == 0) return; | |
1163 | |
1164 RubberBandStretcher *ts = m_timeStretcher; | |
1165 RubberBandStretcher *ms = m_monoStretcher; | |
1166 | |
1167 double ratio = ts ? ts->getTimeRatio() : 1.0; | |
1168 | |
1169 if (ratio != m_stretchRatio) { | |
1170 if (!ts) { | |
1171 cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: Time ratio change to " << m_stretchRatio << " is pending, but no stretcher is set" << endl; | |
1172 m_stretchRatio = 1.0; | |
1173 } else { | |
1174 ts->setTimeRatio(m_stretchRatio); | |
1175 if (ms) ms->setTimeRatio(m_stretchRatio); | |
1176 if (m_stretchRatio >= 1.0) m_stretchMono = false; | |
1177 } | |
1178 } | |
1179 | |
1180 int stretchChannels = m_stretcherInputCount; | |
1181 if (m_stretchMono) { | |
1182 if (ms) { | |
1183 ts = ms; | |
1184 stretchChannels = 1; | |
1185 } else { | |
1186 m_stretchMono = false; | |
1187 } | |
1188 } | |
1189 | |
1190 if (m_target) { | |
1191 m_lastRetrievedBlockSize = count; | |
1192 m_lastRetrievalTimestamp = m_target->getCurrentTime(); | |
1193 } | |
1194 | |
1195 if (!ts || ratio == 1.f) { | |
1196 | |
1197 int got = 0; | |
1198 | |
1199 for (int ch = 0; ch < getTargetChannelCount(); ++ch) { | |
1200 | |
1201 RingBuffer<float> *rb = getReadRingBuffer(ch); | |
1202 | |
1203 if (rb) { | |
1204 | |
1205 // this is marginally more likely to leave our channels in | |
1206 // sync after a processing failure than just passing "count": | |
1207 sv_frame_t request = count; | |
1208 if (ch > 0) request = got; | |
1209 | |
1210 got = rb->read(buffer[ch], int(request)); | |
1211 | |
1212 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
1213 cout << "AudioCallbackPlaySource::getSamples: got " << got << " (of " << count << ") samples on channel " << ch << ", signalling for more (possibly)" << endl; | |
1214 #endif | |
1215 } | |
1216 | |
1217 for (int ch = 0; ch < getTargetChannelCount(); ++ch) { | |
1218 for (int i = got; i < count; ++i) { | |
1219 buffer[ch][i] = 0.0; | |
1220 } | |
1221 } | |
1222 } | |
1223 | |
1224 applyAuditioningEffect(count, buffer); | |
1225 | |
1226 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1227 cout << "AudioCallbackPlaySource::getSamples: awakening thread" << endl; | |
1228 #endif | |
1229 | |
1230 m_condition.wakeAll(); | |
1231 | |
1232 return; | |
1233 } | |
1234 | |
1235 int channels = getTargetChannelCount(); | |
1236 sv_frame_t available; | |
1237 sv_frame_t fedToStretcher = 0; | |
1238 int warned = 0; | |
1239 | |
1240 // The input block for a given output is approx output / ratio, | |
1241 // but we can't predict it exactly, for an adaptive timestretcher. | |
1242 | |
1243 while ((available = ts->available()) < count) { | |
1244 | |
1245 sv_frame_t reqd = lrint(double(count - available) / ratio); | |
1246 reqd = std::max(reqd, sv_frame_t(ts->getSamplesRequired())); | |
1247 if (reqd == 0) reqd = 1; | |
1248 | |
1249 sv_frame_t got = reqd; | |
1250 | |
1251 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
1252 cerr << "reqd = " <<reqd << ", channels = " << channels << ", ic = " << m_stretcherInputCount << endl; | |
1253 #endif | |
1254 | |
1255 for (int c = 0; c < channels; ++c) { | |
1256 if (c >= m_stretcherInputCount) continue; | |
1257 if (reqd > m_stretcherInputSizes[c]) { | |
1258 if (c == 0) { | |
1259 cerr << "WARNING: resizing stretcher input buffer from " << m_stretcherInputSizes[c] << " to " << (reqd * 2) << endl; | |
1260 } | |
1261 delete[] m_stretcherInputs[c]; | |
1262 m_stretcherInputSizes[c] = reqd * 2; | |
1263 m_stretcherInputs[c] = new float[m_stretcherInputSizes[c]]; | |
1264 } | |
1265 } | |
1266 | |
1267 for (int c = 0; c < channels; ++c) { | |
1268 if (c >= m_stretcherInputCount) continue; | |
1269 RingBuffer<float> *rb = getReadRingBuffer(c); | |
1270 if (rb) { | |
1271 sv_frame_t gotHere; | |
1272 if (stretchChannels == 1 && c > 0) { | |
1273 gotHere = rb->readAdding(m_stretcherInputs[0], int(got)); | |
1274 } else { | |
1275 gotHere = rb->read(m_stretcherInputs[c], int(got)); | |
1276 } | |
1277 if (gotHere < got) got = gotHere; | |
1278 | |
1279 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
1280 if (c == 0) { | |
1281 SVDEBUG << "feeding stretcher: got " << gotHere | |
1282 << ", " << rb->getReadSpace() << " remain" << endl; | |
1283 } | |
1284 #endif | |
1285 | |
1286 } else { | |
1287 cerr << "WARNING: No ring buffer available for channel " << c << " in stretcher input block" << endl; | |
1288 } | |
1289 } | |
1290 | |
1291 if (got < reqd) { | |
1292 cerr << "WARNING: Read underrun in playback (" | |
1293 << got << " < " << reqd << ")" << endl; | |
1294 } | |
1295 | |
1296 ts->process(m_stretcherInputs, size_t(got), false); | |
1297 | |
1298 fedToStretcher += got; | |
1299 | |
1300 if (got == 0) break; | |
1301 | |
1302 if (ts->available() == available) { | |
1303 cerr << "WARNING: AudioCallbackPlaySource::getSamples: Added " << got << " samples to time stretcher, created no new available output samples (warned = " << warned << ")" << endl; | |
1304 if (++warned == 5) break; | |
1305 } | |
1306 } | |
1307 | |
1308 ts->retrieve(buffer, size_t(count)); | |
1309 | |
1310 for (int c = stretchChannels; c < getTargetChannelCount(); ++c) { | |
1311 for (int i = 0; i < count; ++i) { | |
1312 buffer[c][i] = buffer[0][i]; | |
1313 } | |
1314 } | |
1315 | |
1316 applyAuditioningEffect(count, buffer); | |
1317 | |
1318 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1319 cout << "AudioCallbackPlaySource::getSamples [stretched]: awakening thread" << endl; | |
1320 #endif | |
1321 | |
1322 m_condition.wakeAll(); | |
1323 | |
1324 return; | |
1325 } | |
1326 | |
1327 void | |
1328 AudioCallbackPlaySource::applyAuditioningEffect(sv_frame_t count, float **buffers) | |
1329 { | |
1330 if (m_auditioningPluginBypassed) return; | |
1331 RealTimePluginInstance *plugin = m_auditioningPlugin; | |
1332 if (!plugin) return; | |
1333 | |
1334 if ((int)plugin->getAudioInputCount() != getTargetChannelCount()) { | |
1335 // cerr << "plugin input count " << plugin->getAudioInputCount() | |
1336 // << " != our channel count " << getTargetChannelCount() | |
1337 // << endl; | |
1338 return; | |
1339 } | |
1340 if ((int)plugin->getAudioOutputCount() != getTargetChannelCount()) { | |
1341 // cerr << "plugin output count " << plugin->getAudioOutputCount() | |
1342 // << " != our channel count " << getTargetChannelCount() | |
1343 // << endl; | |
1344 return; | |
1345 } | |
1346 if ((int)plugin->getBufferSize() < count) { | |
1347 // cerr << "plugin buffer size " << plugin->getBufferSize() | |
1348 // << " < our block size " << count | |
1349 // << endl; | |
1350 return; | |
1351 } | |
1352 | |
1353 float **ib = plugin->getAudioInputBuffers(); | |
1354 float **ob = plugin->getAudioOutputBuffers(); | |
1355 | |
1356 for (int c = 0; c < getTargetChannelCount(); ++c) { | |
1357 for (int i = 0; i < count; ++i) { | |
1358 ib[c][i] = buffers[c][i]; | |
1359 } | |
1360 } | |
1361 | |
1362 plugin->run(Vamp::RealTime::zeroTime, int(count)); | |
1363 | |
1364 for (int c = 0; c < getTargetChannelCount(); ++c) { | |
1365 for (int i = 0; i < count; ++i) { | |
1366 buffers[c][i] = ob[c][i]; | |
1367 } | |
1368 } | |
1369 } | |
1370 | |
1371 // Called from fill thread, m_playing true, mutex held | |
1372 bool | |
1373 AudioCallbackPlaySource::fillBuffers() | |
1374 { | |
1375 static float *tmp = 0; | |
1376 static sv_frame_t tmpSize = 0; | |
1377 | |
1378 sv_frame_t space = 0; | |
1379 for (int c = 0; c < getTargetChannelCount(); ++c) { | |
1380 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
1381 if (wb) { | |
1382 sv_frame_t spaceHere = wb->getWriteSpace(); | |
1383 if (c == 0 || spaceHere < space) space = spaceHere; | |
1384 } | |
1385 } | |
1386 | |
1387 if (space == 0) { | |
1388 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1389 cout << "AudioCallbackPlaySourceFillThread: no space to fill" << endl; | |
1390 #endif | |
1391 return false; | |
1392 } | |
1393 | |
1394 sv_frame_t f = m_writeBufferFill; | |
1395 | |
1396 bool readWriteEqual = (m_readBuffers == m_writeBuffers); | |
1397 | |
1398 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1399 if (!readWriteEqual) { | |
1400 cout << "AudioCallbackPlaySourceFillThread: note read buffers != write buffers" << endl; | |
1401 } | |
1402 cout << "AudioCallbackPlaySourceFillThread: filling " << space << " frames" << endl; | |
1403 #endif | |
1404 | |
1405 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1406 cout << "buffered to " << f << " already" << endl; | |
1407 #endif | |
1408 | |
1409 bool resample = (getSourceSampleRate() != getTargetSampleRate()); | |
1410 | |
1411 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1412 cout << (resample ? "" : "not ") << "resampling (source " << getSourceSampleRate() << ", target " << getTargetSampleRate() << ")" << endl; | |
1413 #endif | |
1414 | |
1415 int channels = getTargetChannelCount(); | |
1416 | |
1417 sv_frame_t orig = space; | |
1418 sv_frame_t got = 0; | |
1419 | |
1420 static float **bufferPtrs = 0; | |
1421 static int bufferPtrCount = 0; | |
1422 | |
1423 if (bufferPtrCount < channels) { | |
1424 if (bufferPtrs) delete[] bufferPtrs; | |
1425 bufferPtrs = new float *[channels]; | |
1426 bufferPtrCount = channels; | |
1427 } | |
1428 | |
1429 sv_frame_t generatorBlockSize = m_audioGenerator->getBlockSize(); | |
1430 | |
1431 if (resample && !m_converter) { | |
1432 static bool warned = false; | |
1433 if (!warned) { | |
1434 cerr << "WARNING: sample rates differ, but no converter available!" << endl; | |
1435 warned = true; | |
1436 } | |
1437 } | |
1438 | |
1439 if (resample && m_converter) { | |
1440 | |
1441 double ratio = | |
1442 double(getTargetSampleRate()) / double(getSourceSampleRate()); | |
1443 orig = sv_frame_t(double(orig) / ratio + 0.1); | |
1444 | |
1445 // orig must be a multiple of generatorBlockSize | |
1446 orig = (orig / generatorBlockSize) * generatorBlockSize; | |
1447 if (orig == 0) return false; | |
1448 | |
1449 sv_frame_t work = std::max(orig, space); | |
1450 | |
1451 // We only allocate one buffer, but we use it in two halves. | |
1452 // We place the non-interleaved values in the second half of | |
1453 // the buffer (orig samples for channel 0, orig samples for | |
1454 // channel 1 etc), and then interleave them into the first | |
1455 // half of the buffer. Then we resample back into the second | |
1456 // half (interleaved) and de-interleave the results back to | |
1457 // the start of the buffer for insertion into the ringbuffers. | |
1458 // What a faff -- especially as we've already de-interleaved | |
1459 // the audio data from the source file elsewhere before we | |
1460 // even reach this point. | |
1461 | |
1462 if (tmpSize < channels * work * 2) { | |
1463 delete[] tmp; | |
1464 tmp = new float[channels * work * 2]; | |
1465 tmpSize = channels * work * 2; | |
1466 } | |
1467 | |
1468 float *nonintlv = tmp + channels * work; | |
1469 float *intlv = tmp; | |
1470 float *srcout = tmp + channels * work; | |
1471 | |
1472 for (int c = 0; c < channels; ++c) { | |
1473 for (int i = 0; i < orig; ++i) { | |
1474 nonintlv[channels * i + c] = 0.0f; | |
1475 } | |
1476 } | |
1477 | |
1478 for (int c = 0; c < channels; ++c) { | |
1479 bufferPtrs[c] = nonintlv + c * orig; | |
1480 } | |
1481 | |
1482 got = mixModels(f, orig, bufferPtrs); // also modifies f | |
1483 | |
1484 // and interleave into first half | |
1485 for (int c = 0; c < channels; ++c) { | |
1486 for (int i = 0; i < got; ++i) { | |
1487 float sample = nonintlv[c * got + i]; | |
1488 intlv[channels * i + c] = sample; | |
1489 } | |
1490 } | |
1491 | |
1492 SRC_DATA data; | |
1493 data.data_in = intlv; | |
1494 data.data_out = srcout; | |
1495 data.input_frames = long(got); | |
1496 data.output_frames = long(work); | |
1497 data.src_ratio = ratio; | |
1498 data.end_of_input = 0; | |
1499 | |
1500 int err = 0; | |
1501 | |
1502 if (m_timeStretcher && m_timeStretcher->getTimeRatio() < 0.4) { | |
1503 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1504 cout << "Using crappy converter" << endl; | |
1505 #endif | |
1506 err = src_process(m_crapConverter, &data); | |
1507 } else { | |
1508 err = src_process(m_converter, &data); | |
1509 } | |
1510 | |
1511 sv_frame_t toCopy = sv_frame_t(double(got) * ratio + 0.1); | |
1512 | |
1513 if (err) { | |
1514 cerr | |
1515 << "AudioCallbackPlaySourceFillThread: ERROR in samplerate conversion: " | |
1516 << src_strerror(err) << endl; | |
1517 //!!! Then what? | |
1518 } else { | |
1519 got = data.input_frames_used; | |
1520 toCopy = data.output_frames_gen; | |
1521 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1522 cout << "Resampled " << got << " frames to " << toCopy << " frames" << endl; | |
1523 #endif | |
1524 } | |
1525 | |
1526 for (int c = 0; c < channels; ++c) { | |
1527 for (int i = 0; i < toCopy; ++i) { | |
1528 tmp[i] = srcout[channels * i + c]; | |
1529 } | |
1530 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
1531 if (wb) wb->write(tmp, int(toCopy)); | |
1532 } | |
1533 | |
1534 m_writeBufferFill = f; | |
1535 if (readWriteEqual) m_readBufferFill = f; | |
1536 | |
1537 } else { | |
1538 | |
1539 // space must be a multiple of generatorBlockSize | |
1540 sv_frame_t reqSpace = space; | |
1541 space = (reqSpace / generatorBlockSize) * generatorBlockSize; | |
1542 if (space == 0) { | |
1543 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1544 cout << "requested fill of " << reqSpace | |
1545 << " is less than generator block size of " | |
1546 << generatorBlockSize << ", leaving it" << endl; | |
1547 #endif | |
1548 return false; | |
1549 } | |
1550 | |
1551 if (tmpSize < channels * space) { | |
1552 delete[] tmp; | |
1553 tmp = new float[channels * space]; | |
1554 tmpSize = channels * space; | |
1555 } | |
1556 | |
1557 for (int c = 0; c < channels; ++c) { | |
1558 | |
1559 bufferPtrs[c] = tmp + c * space; | |
1560 | |
1561 for (int i = 0; i < space; ++i) { | |
1562 tmp[c * space + i] = 0.0f; | |
1563 } | |
1564 } | |
1565 | |
1566 sv_frame_t got = mixModels(f, space, bufferPtrs); // also modifies f | |
1567 | |
1568 for (int c = 0; c < channels; ++c) { | |
1569 | |
1570 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
1571 if (wb) { | |
1572 int actual = wb->write(bufferPtrs[c], int(got)); | |
1573 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1574 cout << "Wrote " << actual << " samples for ch " << c << ", now " | |
1575 << wb->getReadSpace() << " to read" | |
1576 << endl; | |
1577 #endif | |
1578 if (actual < got) { | |
1579 cerr << "WARNING: Buffer overrun in channel " << c | |
1580 << ": wrote " << actual << " of " << got | |
1581 << " samples" << endl; | |
1582 } | |
1583 } | |
1584 } | |
1585 | |
1586 m_writeBufferFill = f; | |
1587 if (readWriteEqual) m_readBufferFill = f; | |
1588 | |
1589 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1590 cout << "Read buffer fill is now " << m_readBufferFill << endl; | |
1591 #endif | |
1592 | |
1593 //!!! how do we know when ended? need to mark up a fully-buffered flag and check this if we find the buffers empty in getSourceSamples | |
1594 } | |
1595 | |
1596 return true; | |
1597 } | |
1598 | |
1599 sv_frame_t | |
1600 AudioCallbackPlaySource::mixModels(sv_frame_t &frame, sv_frame_t count, float **buffers) | |
1601 { | |
1602 sv_frame_t processed = 0; | |
1603 sv_frame_t chunkStart = frame; | |
1604 sv_frame_t chunkSize = count; | |
1605 sv_frame_t selectionSize = 0; | |
1606 sv_frame_t nextChunkStart = chunkStart + chunkSize; | |
1607 | |
1608 bool looping = m_viewManager->getPlayLoopMode(); | |
1609 bool constrained = (m_viewManager->getPlaySelectionMode() && | |
1610 !m_viewManager->getSelections().empty()); | |
1611 | |
1612 static float **chunkBufferPtrs = 0; | |
1613 static int chunkBufferPtrCount = 0; | |
1614 int channels = getTargetChannelCount(); | |
1615 | |
1616 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1617 cout << "Selection playback: start " << frame << ", size " << count <<", channels " << channels << endl; | |
1618 #endif | |
1619 | |
1620 if (chunkBufferPtrCount < channels) { | |
1621 if (chunkBufferPtrs) delete[] chunkBufferPtrs; | |
1622 chunkBufferPtrs = new float *[channels]; | |
1623 chunkBufferPtrCount = channels; | |
1624 } | |
1625 | |
1626 for (int c = 0; c < channels; ++c) { | |
1627 chunkBufferPtrs[c] = buffers[c]; | |
1628 } | |
1629 | |
1630 while (processed < count) { | |
1631 | |
1632 chunkSize = count - processed; | |
1633 nextChunkStart = chunkStart + chunkSize; | |
1634 selectionSize = 0; | |
1635 | |
1636 sv_frame_t fadeIn = 0, fadeOut = 0; | |
1637 | |
1638 if (constrained) { | |
1639 | |
1640 sv_frame_t rChunkStart = | |
1641 m_viewManager->alignPlaybackFrameToReference(chunkStart); | |
1642 | |
1643 Selection selection = | |
1644 m_viewManager->getContainingSelection(rChunkStart, true); | |
1645 | |
1646 if (selection.isEmpty()) { | |
1647 if (looping) { | |
1648 selection = *m_viewManager->getSelections().begin(); | |
1649 chunkStart = m_viewManager->alignReferenceToPlaybackFrame | |
1650 (selection.getStartFrame()); | |
1651 fadeIn = 50; | |
1652 } | |
1653 } | |
1654 | |
1655 if (selection.isEmpty()) { | |
1656 | |
1657 chunkSize = 0; | |
1658 nextChunkStart = chunkStart; | |
1659 | |
1660 } else { | |
1661 | |
1662 sv_frame_t sf = m_viewManager->alignReferenceToPlaybackFrame | |
1663 (selection.getStartFrame()); | |
1664 sv_frame_t ef = m_viewManager->alignReferenceToPlaybackFrame | |
1665 (selection.getEndFrame()); | |
1666 | |
1667 selectionSize = ef - sf; | |
1668 | |
1669 if (chunkStart < sf) { | |
1670 chunkStart = sf; | |
1671 fadeIn = 50; | |
1672 } | |
1673 | |
1674 nextChunkStart = chunkStart + chunkSize; | |
1675 | |
1676 if (nextChunkStart >= ef) { | |
1677 nextChunkStart = ef; | |
1678 fadeOut = 50; | |
1679 } | |
1680 | |
1681 chunkSize = nextChunkStart - chunkStart; | |
1682 } | |
1683 | |
1684 } else if (looping && m_lastModelEndFrame > 0) { | |
1685 | |
1686 if (chunkStart >= m_lastModelEndFrame) { | |
1687 chunkStart = 0; | |
1688 } | |
1689 if (chunkSize > m_lastModelEndFrame - chunkStart) { | |
1690 chunkSize = m_lastModelEndFrame - chunkStart; | |
1691 } | |
1692 nextChunkStart = chunkStart + chunkSize; | |
1693 } | |
1694 | |
1695 // cout << "chunkStart " << chunkStart << ", chunkSize " << chunkSize << ", nextChunkStart " << nextChunkStart << ", frame " << frame << ", count " << count << ", processed " << processed << endl; | |
1696 | |
1697 if (!chunkSize) { | |
1698 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1699 cout << "Ending selection playback at " << nextChunkStart << endl; | |
1700 #endif | |
1701 // We need to maintain full buffers so that the other | |
1702 // thread can tell where it's got to in the playback -- so | |
1703 // return the full amount here | |
1704 frame = frame + count; | |
1705 return count; | |
1706 } | |
1707 | |
1708 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1709 cout << "Selection playback: chunk at " << chunkStart << " -> " << nextChunkStart << " (size " << chunkSize << ")" << endl; | |
1710 #endif | |
1711 | |
1712 if (selectionSize < 100) { | |
1713 fadeIn = 0; | |
1714 fadeOut = 0; | |
1715 } else if (selectionSize < 300) { | |
1716 if (fadeIn > 0) fadeIn = 10; | |
1717 if (fadeOut > 0) fadeOut = 10; | |
1718 } | |
1719 | |
1720 if (fadeIn > 0) { | |
1721 if (processed * 2 < fadeIn) { | |
1722 fadeIn = processed * 2; | |
1723 } | |
1724 } | |
1725 | |
1726 if (fadeOut > 0) { | |
1727 if ((count - processed - chunkSize) * 2 < fadeOut) { | |
1728 fadeOut = (count - processed - chunkSize) * 2; | |
1729 } | |
1730 } | |
1731 | |
1732 for (std::set<Model *>::iterator mi = m_models.begin(); | |
1733 mi != m_models.end(); ++mi) { | |
1734 | |
1735 (void) m_audioGenerator->mixModel(*mi, chunkStart, | |
1736 chunkSize, chunkBufferPtrs, | |
1737 fadeIn, fadeOut); | |
1738 } | |
1739 | |
1740 for (int c = 0; c < channels; ++c) { | |
1741 chunkBufferPtrs[c] += chunkSize; | |
1742 } | |
1743 | |
1744 processed += chunkSize; | |
1745 chunkStart = nextChunkStart; | |
1746 } | |
1747 | |
1748 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1749 cout << "Returning selection playback " << processed << " frames to " << nextChunkStart << endl; | |
1750 #endif | |
1751 | |
1752 frame = nextChunkStart; | |
1753 return processed; | |
1754 } | |
1755 | |
1756 void | |
1757 AudioCallbackPlaySource::unifyRingBuffers() | |
1758 { | |
1759 if (m_readBuffers == m_writeBuffers) return; | |
1760 | |
1761 // only unify if there will be something to read | |
1762 for (int c = 0; c < getTargetChannelCount(); ++c) { | |
1763 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
1764 if (wb) { | |
1765 if (wb->getReadSpace() < m_blockSize * 2) { | |
1766 if ((m_writeBufferFill + m_blockSize * 2) < | |
1767 m_lastModelEndFrame) { | |
1768 // OK, we don't have enough and there's more to | |
1769 // read -- don't unify until we can do better | |
1770 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
1771 SVDEBUG << "AudioCallbackPlaySource::unifyRingBuffers: Not unifying: write buffer has less (" << wb->getReadSpace() << ") than " << m_blockSize*2 << " to read and write buffer fill (" << m_writeBufferFill << ") is not close to end frame (" << m_lastModelEndFrame << ")" << endl; | |
1772 #endif | |
1773 return; | |
1774 } | |
1775 } | |
1776 break; | |
1777 } | |
1778 } | |
1779 | |
1780 sv_frame_t rf = m_readBufferFill; | |
1781 RingBuffer<float> *rb = getReadRingBuffer(0); | |
1782 if (rb) { | |
1783 int rs = rb->getReadSpace(); | |
1784 //!!! incorrect when in non-contiguous selection, see comments elsewhere | |
1785 // cout << "rs = " << rs << endl; | |
1786 if (rs < rf) rf -= rs; | |
1787 else rf = 0; | |
1788 } | |
1789 | |
1790 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
1791 SVDEBUG << "AudioCallbackPlaySource::unifyRingBuffers: m_readBufferFill = " << m_readBufferFill << ", rf = " << rf << ", m_writeBufferFill = " << m_writeBufferFill << endl; | |
1792 #endif | |
1793 | |
1794 sv_frame_t wf = m_writeBufferFill; | |
1795 sv_frame_t skip = 0; | |
1796 for (int c = 0; c < getTargetChannelCount(); ++c) { | |
1797 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
1798 if (wb) { | |
1799 if (c == 0) { | |
1800 | |
1801 int wrs = wb->getReadSpace(); | |
1802 // cout << "wrs = " << wrs << endl; | |
1803 | |
1804 if (wrs < wf) wf -= wrs; | |
1805 else wf = 0; | |
1806 // cout << "wf = " << wf << endl; | |
1807 | |
1808 if (wf < rf) skip = rf - wf; | |
1809 if (skip == 0) break; | |
1810 } | |
1811 | |
1812 // cout << "skipping " << skip << endl; | |
1813 wb->skip(int(skip)); | |
1814 } | |
1815 } | |
1816 | |
1817 m_bufferScavenger.claim(m_readBuffers); | |
1818 m_readBuffers = m_writeBuffers; | |
1819 m_readBufferFill = m_writeBufferFill; | |
1820 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
1821 cerr << "unified" << endl; | |
1822 #endif | |
1823 } | |
1824 | |
1825 void | |
1826 AudioCallbackPlaySource::FillThread::run() | |
1827 { | |
1828 AudioCallbackPlaySource &s(m_source); | |
1829 | |
1830 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1831 cout << "AudioCallbackPlaySourceFillThread starting" << endl; | |
1832 #endif | |
1833 | |
1834 s.m_mutex.lock(); | |
1835 | |
1836 bool previouslyPlaying = s.m_playing; | |
1837 bool work = false; | |
1838 | |
1839 while (!s.m_exiting) { | |
1840 | |
1841 s.unifyRingBuffers(); | |
1842 s.m_bufferScavenger.scavenge(); | |
1843 s.m_pluginScavenger.scavenge(); | |
1844 | |
1845 if (work && s.m_playing && s.getSourceSampleRate()) { | |
1846 | |
1847 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1848 cout << "AudioCallbackPlaySourceFillThread: not waiting" << endl; | |
1849 #endif | |
1850 | |
1851 s.m_mutex.unlock(); | |
1852 s.m_mutex.lock(); | |
1853 | |
1854 } else { | |
1855 | |
1856 double ms = 100; | |
1857 if (s.getSourceSampleRate() > 0) { | |
1858 ms = double(s.m_ringBufferSize) / s.getSourceSampleRate() * 1000.0; | |
1859 } | |
1860 | |
1861 if (s.m_playing) ms /= 10; | |
1862 | |
1863 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1864 if (!s.m_playing) cout << endl; | |
1865 cout << "AudioCallbackPlaySourceFillThread: waiting for " << ms << "ms..." << endl; | |
1866 #endif | |
1867 | |
1868 s.m_condition.wait(&s.m_mutex, int(ms)); | |
1869 } | |
1870 | |
1871 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1872 cout << "AudioCallbackPlaySourceFillThread: awoken" << endl; | |
1873 #endif | |
1874 | |
1875 work = false; | |
1876 | |
1877 if (!s.getSourceSampleRate()) { | |
1878 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1879 cout << "AudioCallbackPlaySourceFillThread: source sample rate is zero" << endl; | |
1880 #endif | |
1881 continue; | |
1882 } | |
1883 | |
1884 bool playing = s.m_playing; | |
1885 | |
1886 if (playing && !previouslyPlaying) { | |
1887 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1888 cout << "AudioCallbackPlaySourceFillThread: playback state changed, resetting" << endl; | |
1889 #endif | |
1890 for (int c = 0; c < s.getTargetChannelCount(); ++c) { | |
1891 RingBuffer<float> *rb = s.getReadRingBuffer(c); | |
1892 if (rb) rb->reset(); | |
1893 } | |
1894 } | |
1895 previouslyPlaying = playing; | |
1896 | |
1897 work = s.fillBuffers(); | |
1898 } | |
1899 | |
1900 s.m_mutex.unlock(); | |
1901 } | |
1902 |