Mercurial > hg > svapp
comparison audioio/AudioCallbackPlaySource.cpp @ 43:3c5756fb6a68
* Move some things around to facilitate plundering libraries for other
applications without needing to duplicate so much code.
sv/osc -> data/osc
sv/audioio -> audioio
sv/transform -> plugin/transform
sv/document -> document (will rename to framework in next commit)
author | Chris Cannam |
---|---|
date | Wed, 24 Oct 2007 16:34:31 +0000 |
parents | |
children | eb596ef12041 |
comparison
equal
deleted
inserted
replaced
42:0619006a1ee3 | 43:3c5756fb6a68 |
---|---|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ | |
2 | |
3 /* | |
4 Sonic Visualiser | |
5 An audio file viewer and annotation editor. | |
6 Centre for Digital Music, Queen Mary, University of London. | |
7 This file copyright 2006 Chris Cannam and QMUL. | |
8 | |
9 This program is free software; you can redistribute it and/or | |
10 modify it under the terms of the GNU General Public License as | |
11 published by the Free Software Foundation; either version 2 of the | |
12 License, or (at your option) any later version. See the file | |
13 COPYING included with this distribution for more information. | |
14 */ | |
15 | |
16 #include "AudioCallbackPlaySource.h" | |
17 | |
18 #include "AudioGenerator.h" | |
19 | |
20 #include "data/model/Model.h" | |
21 #include "view/ViewManager.h" | |
22 #include "base/PlayParameterRepository.h" | |
23 #include "base/Preferences.h" | |
24 #include "data/model/DenseTimeValueModel.h" | |
25 #include "data/model/WaveFileModel.h" | |
26 #include "data/model/SparseOneDimensionalModel.h" | |
27 #include "plugin/RealTimePluginInstance.h" | |
28 #include "PhaseVocoderTimeStretcher.h" | |
29 | |
30 #include <iostream> | |
31 #include <cassert> | |
32 | |
33 //#define DEBUG_AUDIO_PLAY_SOURCE 1 | |
34 //#define DEBUG_AUDIO_PLAY_SOURCE_PLAYING 1 | |
35 | |
36 const size_t AudioCallbackPlaySource::m_ringBufferSize = 131071; | |
37 | |
38 AudioCallbackPlaySource::AudioCallbackPlaySource(ViewManager *manager) : | |
39 m_viewManager(manager), | |
40 m_audioGenerator(new AudioGenerator()), | |
41 m_readBuffers(0), | |
42 m_writeBuffers(0), | |
43 m_readBufferFill(0), | |
44 m_writeBufferFill(0), | |
45 m_bufferScavenger(1), | |
46 m_sourceChannelCount(0), | |
47 m_blockSize(1024), | |
48 m_sourceSampleRate(0), | |
49 m_targetSampleRate(0), | |
50 m_playLatency(0), | |
51 m_playing(false), | |
52 m_exiting(false), | |
53 m_lastModelEndFrame(0), | |
54 m_outputLeft(0.0), | |
55 m_outputRight(0.0), | |
56 m_auditioningPlugin(0), | |
57 m_auditioningPluginBypassed(false), | |
58 m_timeStretcher(0), | |
59 m_fillThread(0), | |
60 m_converter(0), | |
61 m_crapConverter(0), | |
62 m_resampleQuality(Preferences::getInstance()->getResampleQuality()) | |
63 { | |
64 m_viewManager->setAudioPlaySource(this); | |
65 | |
66 connect(m_viewManager, SIGNAL(selectionChanged()), | |
67 this, SLOT(selectionChanged())); | |
68 connect(m_viewManager, SIGNAL(playLoopModeChanged()), | |
69 this, SLOT(playLoopModeChanged())); | |
70 connect(m_viewManager, SIGNAL(playSelectionModeChanged()), | |
71 this, SLOT(playSelectionModeChanged())); | |
72 | |
73 connect(PlayParameterRepository::getInstance(), | |
74 SIGNAL(playParametersChanged(PlayParameters *)), | |
75 this, SLOT(playParametersChanged(PlayParameters *))); | |
76 | |
77 connect(Preferences::getInstance(), | |
78 SIGNAL(propertyChanged(PropertyContainer::PropertyName)), | |
79 this, SLOT(preferenceChanged(PropertyContainer::PropertyName))); | |
80 } | |
81 | |
82 AudioCallbackPlaySource::~AudioCallbackPlaySource() | |
83 { | |
84 m_exiting = true; | |
85 | |
86 if (m_fillThread) { | |
87 m_condition.wakeAll(); | |
88 m_fillThread->wait(); | |
89 delete m_fillThread; | |
90 } | |
91 | |
92 clearModels(); | |
93 | |
94 if (m_readBuffers != m_writeBuffers) { | |
95 delete m_readBuffers; | |
96 } | |
97 | |
98 delete m_writeBuffers; | |
99 | |
100 delete m_audioGenerator; | |
101 | |
102 m_bufferScavenger.scavenge(true); | |
103 m_pluginScavenger.scavenge(true); | |
104 m_timeStretcherScavenger.scavenge(true); | |
105 } | |
106 | |
107 void | |
108 AudioCallbackPlaySource::addModel(Model *model) | |
109 { | |
110 if (m_models.find(model) != m_models.end()) return; | |
111 | |
112 bool canPlay = m_audioGenerator->addModel(model); | |
113 | |
114 m_mutex.lock(); | |
115 | |
116 m_models.insert(model); | |
117 if (model->getEndFrame() > m_lastModelEndFrame) { | |
118 m_lastModelEndFrame = model->getEndFrame(); | |
119 } | |
120 | |
121 bool buffersChanged = false, srChanged = false; | |
122 | |
123 size_t modelChannels = 1; | |
124 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model); | |
125 if (dtvm) modelChannels = dtvm->getChannelCount(); | |
126 if (modelChannels > m_sourceChannelCount) { | |
127 m_sourceChannelCount = modelChannels; | |
128 } | |
129 | |
130 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
131 std::cout << "Adding model with " << modelChannels << " channels " << std::endl; | |
132 #endif | |
133 | |
134 if (m_sourceSampleRate == 0) { | |
135 | |
136 m_sourceSampleRate = model->getSampleRate(); | |
137 srChanged = true; | |
138 | |
139 } else if (model->getSampleRate() != m_sourceSampleRate) { | |
140 | |
141 // If this is a dense time-value model and we have no other, we | |
142 // can just switch to this model's sample rate | |
143 | |
144 if (dtvm) { | |
145 | |
146 bool conflicting = false; | |
147 | |
148 for (std::set<Model *>::const_iterator i = m_models.begin(); | |
149 i != m_models.end(); ++i) { | |
150 // Only wave file models can be considered conflicting -- | |
151 // writable wave file models are derived and we shouldn't | |
152 // take their rates into account. Also, don't give any | |
153 // particular weight to a file that's already playing at | |
154 // the wrong rate anyway | |
155 WaveFileModel *wfm = dynamic_cast<WaveFileModel *>(*i); | |
156 if (wfm && wfm != dtvm && | |
157 wfm->getSampleRate() != model->getSampleRate() && | |
158 wfm->getSampleRate() == m_sourceSampleRate) { | |
159 std::cerr << "AudioCallbackPlaySource::addModel: Conflicting wave file model " << *i << " found" << std::endl; | |
160 conflicting = true; | |
161 break; | |
162 } | |
163 } | |
164 | |
165 if (conflicting) { | |
166 | |
167 std::cerr << "AudioCallbackPlaySource::addModel: ERROR: " | |
168 << "New model sample rate does not match" << std::endl | |
169 << "existing model(s) (new " << model->getSampleRate() | |
170 << " vs " << m_sourceSampleRate | |
171 << "), playback will be wrong" | |
172 << std::endl; | |
173 | |
174 emit sampleRateMismatch(model->getSampleRate(), | |
175 m_sourceSampleRate, | |
176 false); | |
177 } else { | |
178 m_sourceSampleRate = model->getSampleRate(); | |
179 srChanged = true; | |
180 } | |
181 } | |
182 } | |
183 | |
184 if (!m_writeBuffers || (m_writeBuffers->size() < getTargetChannelCount())) { | |
185 clearRingBuffers(true, getTargetChannelCount()); | |
186 buffersChanged = true; | |
187 } else { | |
188 if (canPlay) clearRingBuffers(true); | |
189 } | |
190 | |
191 if (buffersChanged || srChanged) { | |
192 if (m_converter) { | |
193 src_delete(m_converter); | |
194 src_delete(m_crapConverter); | |
195 m_converter = 0; | |
196 m_crapConverter = 0; | |
197 } | |
198 } | |
199 | |
200 m_mutex.unlock(); | |
201 | |
202 m_audioGenerator->setTargetChannelCount(getTargetChannelCount()); | |
203 | |
204 if (!m_fillThread) { | |
205 m_fillThread = new FillThread(*this); | |
206 m_fillThread->start(); | |
207 } | |
208 | |
209 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
210 std::cout << "AudioCallbackPlaySource::addModel: now have " << m_models.size() << " model(s) -- emitting modelReplaced" << std::endl; | |
211 #endif | |
212 | |
213 if (buffersChanged || srChanged) { | |
214 emit modelReplaced(); | |
215 } | |
216 | |
217 connect(model, SIGNAL(modelChanged(size_t, size_t)), | |
218 this, SLOT(modelChanged(size_t, size_t))); | |
219 | |
220 m_condition.wakeAll(); | |
221 } | |
222 | |
223 void | |
224 AudioCallbackPlaySource::modelChanged(size_t startFrame, size_t endFrame) | |
225 { | |
226 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
227 std::cerr << "AudioCallbackPlaySource::modelChanged(" << startFrame << "," << endFrame << ")" << std::endl; | |
228 #endif | |
229 if (endFrame > m_lastModelEndFrame) m_lastModelEndFrame = endFrame; | |
230 } | |
231 | |
232 void | |
233 AudioCallbackPlaySource::removeModel(Model *model) | |
234 { | |
235 m_mutex.lock(); | |
236 | |
237 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
238 std::cout << "AudioCallbackPlaySource::removeModel(" << model << ")" << std::endl; | |
239 #endif | |
240 | |
241 disconnect(model, SIGNAL(modelChanged(size_t, size_t)), | |
242 this, SLOT(modelChanged(size_t, size_t))); | |
243 | |
244 m_models.erase(model); | |
245 | |
246 if (m_models.empty()) { | |
247 if (m_converter) { | |
248 src_delete(m_converter); | |
249 src_delete(m_crapConverter); | |
250 m_converter = 0; | |
251 m_crapConverter = 0; | |
252 } | |
253 m_sourceSampleRate = 0; | |
254 } | |
255 | |
256 size_t lastEnd = 0; | |
257 for (std::set<Model *>::const_iterator i = m_models.begin(); | |
258 i != m_models.end(); ++i) { | |
259 // std::cout << "AudioCallbackPlaySource::removeModel(" << model << "): checking end frame on model " << *i << std::endl; | |
260 if ((*i)->getEndFrame() > lastEnd) lastEnd = (*i)->getEndFrame(); | |
261 // std::cout << "(done, lastEnd now " << lastEnd << ")" << std::endl; | |
262 } | |
263 m_lastModelEndFrame = lastEnd; | |
264 | |
265 m_mutex.unlock(); | |
266 | |
267 m_audioGenerator->removeModel(model); | |
268 | |
269 clearRingBuffers(); | |
270 } | |
271 | |
272 void | |
273 AudioCallbackPlaySource::clearModels() | |
274 { | |
275 m_mutex.lock(); | |
276 | |
277 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
278 std::cout << "AudioCallbackPlaySource::clearModels()" << std::endl; | |
279 #endif | |
280 | |
281 m_models.clear(); | |
282 | |
283 if (m_converter) { | |
284 src_delete(m_converter); | |
285 src_delete(m_crapConverter); | |
286 m_converter = 0; | |
287 m_crapConverter = 0; | |
288 } | |
289 | |
290 m_lastModelEndFrame = 0; | |
291 | |
292 m_sourceSampleRate = 0; | |
293 | |
294 m_mutex.unlock(); | |
295 | |
296 m_audioGenerator->clearModels(); | |
297 } | |
298 | |
299 void | |
300 AudioCallbackPlaySource::clearRingBuffers(bool haveLock, size_t count) | |
301 { | |
302 if (!haveLock) m_mutex.lock(); | |
303 | |
304 if (count == 0) { | |
305 if (m_writeBuffers) count = m_writeBuffers->size(); | |
306 } | |
307 | |
308 size_t sf = m_readBufferFill; | |
309 RingBuffer<float> *rb = getReadRingBuffer(0); | |
310 if (rb) { | |
311 //!!! This is incorrect if we're in a non-contiguous selection | |
312 //Same goes for all related code (subtracting the read space | |
313 //from the fill frame to try to establish where the effective | |
314 //pre-resample/timestretch read pointer is) | |
315 size_t rs = rb->getReadSpace(); | |
316 if (rs < sf) sf -= rs; | |
317 else sf = 0; | |
318 } | |
319 m_writeBufferFill = sf; | |
320 | |
321 if (m_readBuffers != m_writeBuffers) { | |
322 delete m_writeBuffers; | |
323 } | |
324 | |
325 m_writeBuffers = new RingBufferVector; | |
326 | |
327 for (size_t i = 0; i < count; ++i) { | |
328 m_writeBuffers->push_back(new RingBuffer<float>(m_ringBufferSize)); | |
329 } | |
330 | |
331 // std::cout << "AudioCallbackPlaySource::clearRingBuffers: Created " | |
332 // << count << " write buffers" << std::endl; | |
333 | |
334 if (!haveLock) { | |
335 m_mutex.unlock(); | |
336 } | |
337 } | |
338 | |
339 void | |
340 AudioCallbackPlaySource::play(size_t startFrame) | |
341 { | |
342 if (m_viewManager->getPlaySelectionMode() && | |
343 !m_viewManager->getSelections().empty()) { | |
344 MultiSelection::SelectionList selections = m_viewManager->getSelections(); | |
345 MultiSelection::SelectionList::iterator i = selections.begin(); | |
346 if (i != selections.end()) { | |
347 if (startFrame < i->getStartFrame()) { | |
348 startFrame = i->getStartFrame(); | |
349 } else { | |
350 MultiSelection::SelectionList::iterator j = selections.end(); | |
351 --j; | |
352 if (startFrame >= j->getEndFrame()) { | |
353 startFrame = i->getStartFrame(); | |
354 } | |
355 } | |
356 } | |
357 } else { | |
358 if (startFrame >= m_lastModelEndFrame) { | |
359 startFrame = 0; | |
360 } | |
361 } | |
362 | |
363 // The fill thread will automatically empty its buffers before | |
364 // starting again if we have not so far been playing, but not if | |
365 // we're just re-seeking. | |
366 | |
367 m_mutex.lock(); | |
368 if (m_playing) { | |
369 m_readBufferFill = m_writeBufferFill = startFrame; | |
370 if (m_readBuffers) { | |
371 for (size_t c = 0; c < getTargetChannelCount(); ++c) { | |
372 RingBuffer<float> *rb = getReadRingBuffer(c); | |
373 if (rb) rb->reset(); | |
374 } | |
375 } | |
376 if (m_converter) src_reset(m_converter); | |
377 if (m_crapConverter) src_reset(m_crapConverter); | |
378 } else { | |
379 if (m_converter) src_reset(m_converter); | |
380 if (m_crapConverter) src_reset(m_crapConverter); | |
381 m_readBufferFill = m_writeBufferFill = startFrame; | |
382 } | |
383 m_mutex.unlock(); | |
384 | |
385 m_audioGenerator->reset(); | |
386 | |
387 bool changed = !m_playing; | |
388 m_playing = true; | |
389 m_condition.wakeAll(); | |
390 if (changed) emit playStatusChanged(m_playing); | |
391 } | |
392 | |
393 void | |
394 AudioCallbackPlaySource::stop() | |
395 { | |
396 bool changed = m_playing; | |
397 m_playing = false; | |
398 m_condition.wakeAll(); | |
399 if (changed) emit playStatusChanged(m_playing); | |
400 } | |
401 | |
402 void | |
403 AudioCallbackPlaySource::selectionChanged() | |
404 { | |
405 if (m_viewManager->getPlaySelectionMode()) { | |
406 clearRingBuffers(); | |
407 } | |
408 } | |
409 | |
410 void | |
411 AudioCallbackPlaySource::playLoopModeChanged() | |
412 { | |
413 clearRingBuffers(); | |
414 } | |
415 | |
416 void | |
417 AudioCallbackPlaySource::playSelectionModeChanged() | |
418 { | |
419 if (!m_viewManager->getSelections().empty()) { | |
420 clearRingBuffers(); | |
421 } | |
422 } | |
423 | |
424 void | |
425 AudioCallbackPlaySource::playParametersChanged(PlayParameters *) | |
426 { | |
427 clearRingBuffers(); | |
428 } | |
429 | |
430 void | |
431 AudioCallbackPlaySource::preferenceChanged(PropertyContainer::PropertyName n) | |
432 { | |
433 if (n == "Resample Quality") { | |
434 setResampleQuality(Preferences::getInstance()->getResampleQuality()); | |
435 } | |
436 } | |
437 | |
438 void | |
439 AudioCallbackPlaySource::audioProcessingOverload() | |
440 { | |
441 RealTimePluginInstance *ap = m_auditioningPlugin; | |
442 if (ap && m_playing && !m_auditioningPluginBypassed) { | |
443 m_auditioningPluginBypassed = true; | |
444 emit audioOverloadPluginDisabled(); | |
445 } | |
446 } | |
447 | |
448 void | |
449 AudioCallbackPlaySource::setTargetBlockSize(size_t size) | |
450 { | |
451 // std::cout << "AudioCallbackPlaySource::setTargetBlockSize() -> " << size << std::endl; | |
452 assert(size < m_ringBufferSize); | |
453 m_blockSize = size; | |
454 } | |
455 | |
456 size_t | |
457 AudioCallbackPlaySource::getTargetBlockSize() const | |
458 { | |
459 // std::cout << "AudioCallbackPlaySource::getTargetBlockSize() -> " << m_blockSize << std::endl; | |
460 return m_blockSize; | |
461 } | |
462 | |
463 void | |
464 AudioCallbackPlaySource::setTargetPlayLatency(size_t latency) | |
465 { | |
466 m_playLatency = latency; | |
467 } | |
468 | |
469 size_t | |
470 AudioCallbackPlaySource::getTargetPlayLatency() const | |
471 { | |
472 return m_playLatency; | |
473 } | |
474 | |
475 size_t | |
476 AudioCallbackPlaySource::getCurrentPlayingFrame() | |
477 { | |
478 bool resample = false; | |
479 double ratio = 1.0; | |
480 | |
481 if (getSourceSampleRate() != getTargetSampleRate()) { | |
482 resample = true; | |
483 ratio = double(getSourceSampleRate()) / double(getTargetSampleRate()); | |
484 } | |
485 | |
486 size_t readSpace = 0; | |
487 for (size_t c = 0; c < getTargetChannelCount(); ++c) { | |
488 RingBuffer<float> *rb = getReadRingBuffer(c); | |
489 if (rb) { | |
490 size_t spaceHere = rb->getReadSpace(); | |
491 if (c == 0 || spaceHere < readSpace) readSpace = spaceHere; | |
492 } | |
493 } | |
494 | |
495 if (resample) { | |
496 readSpace = size_t(readSpace * ratio + 0.1); | |
497 } | |
498 | |
499 size_t latency = m_playLatency; | |
500 if (resample) latency = size_t(m_playLatency * ratio + 0.1); | |
501 | |
502 PhaseVocoderTimeStretcher *timeStretcher = m_timeStretcher; | |
503 if (timeStretcher) { | |
504 latency += timeStretcher->getProcessingLatency(); | |
505 } | |
506 | |
507 latency += readSpace; | |
508 size_t bufferedFrame = m_readBufferFill; | |
509 | |
510 bool looping = m_viewManager->getPlayLoopMode(); | |
511 bool constrained = (m_viewManager->getPlaySelectionMode() && | |
512 !m_viewManager->getSelections().empty()); | |
513 | |
514 size_t framePlaying = bufferedFrame; | |
515 | |
516 if (looping && !constrained) { | |
517 while (framePlaying < latency) framePlaying += m_lastModelEndFrame; | |
518 } | |
519 | |
520 if (framePlaying > latency) framePlaying -= latency; | |
521 else framePlaying = 0; | |
522 | |
523 if (!constrained) { | |
524 if (!looping && framePlaying > m_lastModelEndFrame) { | |
525 framePlaying = m_lastModelEndFrame; | |
526 stop(); | |
527 } | |
528 return framePlaying; | |
529 } | |
530 | |
531 MultiSelection::SelectionList selections = m_viewManager->getSelections(); | |
532 MultiSelection::SelectionList::const_iterator i; | |
533 | |
534 // i = selections.begin(); | |
535 // size_t rangeStart = i->getStartFrame(); | |
536 | |
537 i = selections.end(); | |
538 --i; | |
539 size_t rangeEnd = i->getEndFrame(); | |
540 | |
541 for (i = selections.begin(); i != selections.end(); ++i) { | |
542 if (i->contains(bufferedFrame)) break; | |
543 } | |
544 | |
545 size_t f = bufferedFrame; | |
546 | |
547 // std::cout << "getCurrentPlayingFrame: f=" << f << ", latency=" << latency << ", rangeEnd=" << rangeEnd << std::endl; | |
548 | |
549 if (i == selections.end()) { | |
550 --i; | |
551 if (i->getEndFrame() + latency < f) { | |
552 // std::cout << "framePlaying = " << framePlaying << ", rangeEnd = " << rangeEnd << std::endl; | |
553 | |
554 if (!looping && (framePlaying > rangeEnd)) { | |
555 // std::cout << "STOPPING" << std::endl; | |
556 stop(); | |
557 return rangeEnd; | |
558 } else { | |
559 return framePlaying; | |
560 } | |
561 } else { | |
562 // std::cout << "latency <- " << latency << "-(" << f << "-" << i->getEndFrame() << ")" << std::endl; | |
563 latency -= (f - i->getEndFrame()); | |
564 f = i->getEndFrame(); | |
565 } | |
566 } | |
567 | |
568 // std::cout << "i=(" << i->getStartFrame() << "," << i->getEndFrame() << ") f=" << f << ", latency=" << latency << std::endl; | |
569 | |
570 while (latency > 0) { | |
571 size_t offset = f - i->getStartFrame(); | |
572 if (offset >= latency) { | |
573 if (f > latency) { | |
574 framePlaying = f - latency; | |
575 } else { | |
576 framePlaying = 0; | |
577 } | |
578 break; | |
579 } else { | |
580 if (i == selections.begin()) { | |
581 if (looping) { | |
582 i = selections.end(); | |
583 } | |
584 } | |
585 latency -= offset; | |
586 --i; | |
587 f = i->getEndFrame(); | |
588 } | |
589 } | |
590 | |
591 return framePlaying; | |
592 } | |
593 | |
594 void | |
595 AudioCallbackPlaySource::setOutputLevels(float left, float right) | |
596 { | |
597 m_outputLeft = left; | |
598 m_outputRight = right; | |
599 } | |
600 | |
601 bool | |
602 AudioCallbackPlaySource::getOutputLevels(float &left, float &right) | |
603 { | |
604 left = m_outputLeft; | |
605 right = m_outputRight; | |
606 return true; | |
607 } | |
608 | |
609 void | |
610 AudioCallbackPlaySource::setTargetSampleRate(size_t sr) | |
611 { | |
612 m_targetSampleRate = sr; | |
613 initialiseConverter(); | |
614 } | |
615 | |
616 void | |
617 AudioCallbackPlaySource::initialiseConverter() | |
618 { | |
619 m_mutex.lock(); | |
620 | |
621 if (m_converter) { | |
622 src_delete(m_converter); | |
623 src_delete(m_crapConverter); | |
624 m_converter = 0; | |
625 m_crapConverter = 0; | |
626 } | |
627 | |
628 if (getSourceSampleRate() != getTargetSampleRate()) { | |
629 | |
630 int err = 0; | |
631 | |
632 m_converter = src_new(m_resampleQuality == 2 ? SRC_SINC_BEST_QUALITY : | |
633 m_resampleQuality == 1 ? SRC_SINC_MEDIUM_QUALITY : | |
634 m_resampleQuality == 0 ? SRC_SINC_FASTEST : | |
635 SRC_SINC_MEDIUM_QUALITY, | |
636 getTargetChannelCount(), &err); | |
637 | |
638 if (m_converter) { | |
639 m_crapConverter = src_new(SRC_LINEAR, | |
640 getTargetChannelCount(), | |
641 &err); | |
642 } | |
643 | |
644 if (!m_converter || !m_crapConverter) { | |
645 std::cerr | |
646 << "AudioCallbackPlaySource::setModel: ERROR in creating samplerate converter: " | |
647 << src_strerror(err) << std::endl; | |
648 | |
649 if (m_converter) { | |
650 src_delete(m_converter); | |
651 m_converter = 0; | |
652 } | |
653 | |
654 if (m_crapConverter) { | |
655 src_delete(m_crapConverter); | |
656 m_crapConverter = 0; | |
657 } | |
658 | |
659 m_mutex.unlock(); | |
660 | |
661 emit sampleRateMismatch(getSourceSampleRate(), | |
662 getTargetSampleRate(), | |
663 false); | |
664 } else { | |
665 | |
666 m_mutex.unlock(); | |
667 | |
668 emit sampleRateMismatch(getSourceSampleRate(), | |
669 getTargetSampleRate(), | |
670 true); | |
671 } | |
672 } else { | |
673 m_mutex.unlock(); | |
674 } | |
675 } | |
676 | |
677 void | |
678 AudioCallbackPlaySource::setResampleQuality(int q) | |
679 { | |
680 if (q == m_resampleQuality) return; | |
681 m_resampleQuality = q; | |
682 | |
683 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
684 std::cerr << "AudioCallbackPlaySource::setResampleQuality: setting to " | |
685 << m_resampleQuality << std::endl; | |
686 #endif | |
687 | |
688 initialiseConverter(); | |
689 } | |
690 | |
691 void | |
692 AudioCallbackPlaySource::setAuditioningPlugin(RealTimePluginInstance *plugin) | |
693 { | |
694 RealTimePluginInstance *formerPlugin = m_auditioningPlugin; | |
695 m_auditioningPlugin = plugin; | |
696 m_auditioningPluginBypassed = false; | |
697 if (formerPlugin) m_pluginScavenger.claim(formerPlugin); | |
698 } | |
699 | |
700 void | |
701 AudioCallbackPlaySource::setSoloModelSet(std::set<Model *> s) | |
702 { | |
703 m_audioGenerator->setSoloModelSet(s); | |
704 clearRingBuffers(); | |
705 } | |
706 | |
707 void | |
708 AudioCallbackPlaySource::clearSoloModelSet() | |
709 { | |
710 m_audioGenerator->clearSoloModelSet(); | |
711 clearRingBuffers(); | |
712 } | |
713 | |
714 size_t | |
715 AudioCallbackPlaySource::getTargetSampleRate() const | |
716 { | |
717 if (m_targetSampleRate) return m_targetSampleRate; | |
718 else return getSourceSampleRate(); | |
719 } | |
720 | |
721 size_t | |
722 AudioCallbackPlaySource::getSourceChannelCount() const | |
723 { | |
724 return m_sourceChannelCount; | |
725 } | |
726 | |
727 size_t | |
728 AudioCallbackPlaySource::getTargetChannelCount() const | |
729 { | |
730 if (m_sourceChannelCount < 2) return 2; | |
731 return m_sourceChannelCount; | |
732 } | |
733 | |
734 size_t | |
735 AudioCallbackPlaySource::getSourceSampleRate() const | |
736 { | |
737 return m_sourceSampleRate; | |
738 } | |
739 | |
740 void | |
741 AudioCallbackPlaySource::setTimeStretch(float factor, bool sharpen, bool mono) | |
742 { | |
743 // Avoid locks -- create, assign, mark old one for scavenging | |
744 // later (as a call to getSourceSamples may still be using it) | |
745 | |
746 PhaseVocoderTimeStretcher *existingStretcher = m_timeStretcher; | |
747 | |
748 size_t channels = getTargetChannelCount(); | |
749 if (mono) channels = 1; | |
750 | |
751 if (existingStretcher && | |
752 existingStretcher->getRatio() == factor && | |
753 existingStretcher->getSharpening() == sharpen && | |
754 existingStretcher->getChannelCount() == channels) { | |
755 return; | |
756 } | |
757 | |
758 if (factor != 1) { | |
759 | |
760 if (existingStretcher && | |
761 existingStretcher->getSharpening() == sharpen && | |
762 existingStretcher->getChannelCount() == channels) { | |
763 existingStretcher->setRatio(factor); | |
764 return; | |
765 } | |
766 | |
767 PhaseVocoderTimeStretcher *newStretcher = new PhaseVocoderTimeStretcher | |
768 (getTargetSampleRate(), | |
769 channels, | |
770 factor, | |
771 sharpen, | |
772 getTargetBlockSize()); | |
773 | |
774 m_timeStretcher = newStretcher; | |
775 | |
776 } else { | |
777 m_timeStretcher = 0; | |
778 } | |
779 | |
780 if (existingStretcher) { | |
781 m_timeStretcherScavenger.claim(existingStretcher); | |
782 } | |
783 } | |
784 | |
785 size_t | |
786 AudioCallbackPlaySource::getSourceSamples(size_t count, float **buffer) | |
787 { | |
788 if (!m_playing) { | |
789 for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { | |
790 for (size_t i = 0; i < count; ++i) { | |
791 buffer[ch][i] = 0.0; | |
792 } | |
793 } | |
794 return 0; | |
795 } | |
796 | |
797 // Ensure that all buffers have at least the amount of data we | |
798 // need -- else reduce the size of our requests correspondingly | |
799 | |
800 for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { | |
801 | |
802 RingBuffer<float> *rb = getReadRingBuffer(ch); | |
803 | |
804 if (!rb) { | |
805 std::cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: " | |
806 << "No ring buffer available for channel " << ch | |
807 << ", returning no data here" << std::endl; | |
808 count = 0; | |
809 break; | |
810 } | |
811 | |
812 size_t rs = rb->getReadSpace(); | |
813 if (rs < count) { | |
814 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
815 std::cerr << "WARNING: AudioCallbackPlaySource::getSourceSamples: " | |
816 << "Ring buffer for channel " << ch << " has only " | |
817 << rs << " (of " << count << ") samples available, " | |
818 << "reducing request size" << std::endl; | |
819 #endif | |
820 count = rs; | |
821 } | |
822 } | |
823 | |
824 if (count == 0) return 0; | |
825 | |
826 PhaseVocoderTimeStretcher *ts = m_timeStretcher; | |
827 | |
828 if (!ts || ts->getRatio() == 1) { | |
829 | |
830 size_t got = 0; | |
831 | |
832 for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { | |
833 | |
834 RingBuffer<float> *rb = getReadRingBuffer(ch); | |
835 | |
836 if (rb) { | |
837 | |
838 // this is marginally more likely to leave our channels in | |
839 // sync after a processing failure than just passing "count": | |
840 size_t request = count; | |
841 if (ch > 0) request = got; | |
842 | |
843 got = rb->read(buffer[ch], request); | |
844 | |
845 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
846 std::cout << "AudioCallbackPlaySource::getSamples: got " << got << " (of " << count << ") samples on channel " << ch << ", signalling for more (possibly)" << std::endl; | |
847 #endif | |
848 } | |
849 | |
850 for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { | |
851 for (size_t i = got; i < count; ++i) { | |
852 buffer[ch][i] = 0.0; | |
853 } | |
854 } | |
855 } | |
856 | |
857 applyAuditioningEffect(count, buffer); | |
858 | |
859 m_condition.wakeAll(); | |
860 return got; | |
861 } | |
862 | |
863 float ratio = ts->getRatio(); | |
864 | |
865 // std::cout << "ratio = " << ratio << std::endl; | |
866 | |
867 size_t channels = getTargetChannelCount(); | |
868 bool mix = (channels > 1 && ts->getChannelCount() == 1); | |
869 | |
870 size_t available; | |
871 | |
872 int warned = 0; | |
873 | |
874 // We want output blocks of e.g. 1024 (probably fixed, certainly | |
875 // bounded). We can provide input blocks of any size (unbounded) | |
876 // at the timestretcher's request. The input block for a given | |
877 // output is approx output / ratio, but we can't predict it | |
878 // exactly, for an adaptive timestretcher. The stretcher will | |
879 // need some additional buffer space. See the time stretcher code | |
880 // and comments. | |
881 | |
882 while ((available = ts->getAvailableOutputSamples()) < count) { | |
883 | |
884 size_t reqd = lrintf((count - available) / ratio); | |
885 reqd = std::max(reqd, ts->getRequiredInputSamples()); | |
886 if (reqd == 0) reqd = 1; | |
887 | |
888 float *ib[channels]; | |
889 | |
890 size_t got = reqd; | |
891 | |
892 if (mix) { | |
893 for (size_t c = 0; c < channels; ++c) { | |
894 if (c == 0) ib[c] = new float[reqd]; //!!! fix -- this is a rt function | |
895 else ib[c] = 0; | |
896 RingBuffer<float> *rb = getReadRingBuffer(c); | |
897 if (rb) { | |
898 size_t gotHere; | |
899 if (c > 0) gotHere = rb->readAdding(ib[0], got); | |
900 else gotHere = rb->read(ib[0], got); | |
901 if (gotHere < got) got = gotHere; | |
902 } | |
903 } | |
904 } else { | |
905 for (size_t c = 0; c < channels; ++c) { | |
906 ib[c] = new float[reqd]; //!!! fix -- this is a rt function | |
907 RingBuffer<float> *rb = getReadRingBuffer(c); | |
908 if (rb) { | |
909 size_t gotHere = rb->read(ib[c], got); | |
910 if (gotHere < got) got = gotHere; | |
911 } | |
912 } | |
913 } | |
914 | |
915 if (got < reqd) { | |
916 std::cerr << "WARNING: Read underrun in playback (" | |
917 << got << " < " << reqd << ")" << std::endl; | |
918 } | |
919 | |
920 ts->putInput(ib, got); | |
921 | |
922 for (size_t c = 0; c < channels; ++c) { | |
923 delete[] ib[c]; | |
924 } | |
925 | |
926 if (got == 0) break; | |
927 | |
928 if (ts->getAvailableOutputSamples() == available) { | |
929 std::cerr << "WARNING: AudioCallbackPlaySource::getSamples: Added " << got << " samples to time stretcher, created no new available output samples (warned = " << warned << ")" << std::endl; | |
930 if (++warned == 5) break; | |
931 } | |
932 } | |
933 | |
934 ts->getOutput(buffer, count); | |
935 | |
936 if (mix) { | |
937 for (size_t c = 1; c < channels; ++c) { | |
938 for (size_t i = 0; i < count; ++i) { | |
939 buffer[c][i] = buffer[0][i] / channels; | |
940 } | |
941 } | |
942 for (size_t i = 0; i < count; ++i) { | |
943 buffer[0][i] /= channels; | |
944 } | |
945 } | |
946 | |
947 applyAuditioningEffect(count, buffer); | |
948 | |
949 m_condition.wakeAll(); | |
950 | |
951 return count; | |
952 } | |
953 | |
954 void | |
955 AudioCallbackPlaySource::applyAuditioningEffect(size_t count, float **buffers) | |
956 { | |
957 if (m_auditioningPluginBypassed) return; | |
958 RealTimePluginInstance *plugin = m_auditioningPlugin; | |
959 if (!plugin) return; | |
960 | |
961 if (plugin->getAudioInputCount() != getTargetChannelCount()) { | |
962 // std::cerr << "plugin input count " << plugin->getAudioInputCount() | |
963 // << " != our channel count " << getTargetChannelCount() | |
964 // << std::endl; | |
965 return; | |
966 } | |
967 if (plugin->getAudioOutputCount() != getTargetChannelCount()) { | |
968 // std::cerr << "plugin output count " << plugin->getAudioOutputCount() | |
969 // << " != our channel count " << getTargetChannelCount() | |
970 // << std::endl; | |
971 return; | |
972 } | |
973 if (plugin->getBufferSize() != count) { | |
974 // std::cerr << "plugin buffer size " << plugin->getBufferSize() | |
975 // << " != our block size " << count | |
976 // << std::endl; | |
977 return; | |
978 } | |
979 | |
980 float **ib = plugin->getAudioInputBuffers(); | |
981 float **ob = plugin->getAudioOutputBuffers(); | |
982 | |
983 for (size_t c = 0; c < getTargetChannelCount(); ++c) { | |
984 for (size_t i = 0; i < count; ++i) { | |
985 ib[c][i] = buffers[c][i]; | |
986 } | |
987 } | |
988 | |
989 plugin->run(Vamp::RealTime::zeroTime); | |
990 | |
991 for (size_t c = 0; c < getTargetChannelCount(); ++c) { | |
992 for (size_t i = 0; i < count; ++i) { | |
993 buffers[c][i] = ob[c][i]; | |
994 } | |
995 } | |
996 } | |
997 | |
998 // Called from fill thread, m_playing true, mutex held | |
999 bool | |
1000 AudioCallbackPlaySource::fillBuffers() | |
1001 { | |
1002 static float *tmp = 0; | |
1003 static size_t tmpSize = 0; | |
1004 | |
1005 size_t space = 0; | |
1006 for (size_t c = 0; c < getTargetChannelCount(); ++c) { | |
1007 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
1008 if (wb) { | |
1009 size_t spaceHere = wb->getWriteSpace(); | |
1010 if (c == 0 || spaceHere < space) space = spaceHere; | |
1011 } | |
1012 } | |
1013 | |
1014 if (space == 0) return false; | |
1015 | |
1016 size_t f = m_writeBufferFill; | |
1017 | |
1018 bool readWriteEqual = (m_readBuffers == m_writeBuffers); | |
1019 | |
1020 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1021 std::cout << "AudioCallbackPlaySourceFillThread: filling " << space << " frames" << std::endl; | |
1022 #endif | |
1023 | |
1024 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1025 std::cout << "buffered to " << f << " already" << std::endl; | |
1026 #endif | |
1027 | |
1028 bool resample = (getSourceSampleRate() != getTargetSampleRate()); | |
1029 | |
1030 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1031 std::cout << (resample ? "" : "not ") << "resampling (source " << getSourceSampleRate() << ", target " << getTargetSampleRate() << ")" << std::endl; | |
1032 #endif | |
1033 | |
1034 size_t channels = getTargetChannelCount(); | |
1035 | |
1036 size_t orig = space; | |
1037 size_t got = 0; | |
1038 | |
1039 static float **bufferPtrs = 0; | |
1040 static size_t bufferPtrCount = 0; | |
1041 | |
1042 if (bufferPtrCount < channels) { | |
1043 if (bufferPtrs) delete[] bufferPtrs; | |
1044 bufferPtrs = new float *[channels]; | |
1045 bufferPtrCount = channels; | |
1046 } | |
1047 | |
1048 size_t generatorBlockSize = m_audioGenerator->getBlockSize(); | |
1049 | |
1050 if (resample && !m_converter) { | |
1051 static bool warned = false; | |
1052 if (!warned) { | |
1053 std::cerr << "WARNING: sample rates differ, but no converter available!" << std::endl; | |
1054 warned = true; | |
1055 } | |
1056 } | |
1057 | |
1058 if (resample && m_converter) { | |
1059 | |
1060 double ratio = | |
1061 double(getTargetSampleRate()) / double(getSourceSampleRate()); | |
1062 orig = size_t(orig / ratio + 0.1); | |
1063 | |
1064 // orig must be a multiple of generatorBlockSize | |
1065 orig = (orig / generatorBlockSize) * generatorBlockSize; | |
1066 if (orig == 0) return false; | |
1067 | |
1068 size_t work = std::max(orig, space); | |
1069 | |
1070 // We only allocate one buffer, but we use it in two halves. | |
1071 // We place the non-interleaved values in the second half of | |
1072 // the buffer (orig samples for channel 0, orig samples for | |
1073 // channel 1 etc), and then interleave them into the first | |
1074 // half of the buffer. Then we resample back into the second | |
1075 // half (interleaved) and de-interleave the results back to | |
1076 // the start of the buffer for insertion into the ringbuffers. | |
1077 // What a faff -- especially as we've already de-interleaved | |
1078 // the audio data from the source file elsewhere before we | |
1079 // even reach this point. | |
1080 | |
1081 if (tmpSize < channels * work * 2) { | |
1082 delete[] tmp; | |
1083 tmp = new float[channels * work * 2]; | |
1084 tmpSize = channels * work * 2; | |
1085 } | |
1086 | |
1087 float *nonintlv = tmp + channels * work; | |
1088 float *intlv = tmp; | |
1089 float *srcout = tmp + channels * work; | |
1090 | |
1091 for (size_t c = 0; c < channels; ++c) { | |
1092 for (size_t i = 0; i < orig; ++i) { | |
1093 nonintlv[channels * i + c] = 0.0f; | |
1094 } | |
1095 } | |
1096 | |
1097 for (size_t c = 0; c < channels; ++c) { | |
1098 bufferPtrs[c] = nonintlv + c * orig; | |
1099 } | |
1100 | |
1101 got = mixModels(f, orig, bufferPtrs); | |
1102 | |
1103 // and interleave into first half | |
1104 for (size_t c = 0; c < channels; ++c) { | |
1105 for (size_t i = 0; i < got; ++i) { | |
1106 float sample = nonintlv[c * got + i]; | |
1107 intlv[channels * i + c] = sample; | |
1108 } | |
1109 } | |
1110 | |
1111 SRC_DATA data; | |
1112 data.data_in = intlv; | |
1113 data.data_out = srcout; | |
1114 data.input_frames = got; | |
1115 data.output_frames = work; | |
1116 data.src_ratio = ratio; | |
1117 data.end_of_input = 0; | |
1118 | |
1119 int err = 0; | |
1120 | |
1121 if (m_timeStretcher && m_timeStretcher->getRatio() < 0.4) { | |
1122 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1123 std::cout << "Using crappy converter" << std::endl; | |
1124 #endif | |
1125 err = src_process(m_crapConverter, &data); | |
1126 } else { | |
1127 err = src_process(m_converter, &data); | |
1128 } | |
1129 | |
1130 size_t toCopy = size_t(got * ratio + 0.1); | |
1131 | |
1132 if (err) { | |
1133 std::cerr | |
1134 << "AudioCallbackPlaySourceFillThread: ERROR in samplerate conversion: " | |
1135 << src_strerror(err) << std::endl; | |
1136 //!!! Then what? | |
1137 } else { | |
1138 got = data.input_frames_used; | |
1139 toCopy = data.output_frames_gen; | |
1140 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1141 std::cout << "Resampled " << got << " frames to " << toCopy << " frames" << std::endl; | |
1142 #endif | |
1143 } | |
1144 | |
1145 for (size_t c = 0; c < channels; ++c) { | |
1146 for (size_t i = 0; i < toCopy; ++i) { | |
1147 tmp[i] = srcout[channels * i + c]; | |
1148 } | |
1149 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
1150 if (wb) wb->write(tmp, toCopy); | |
1151 } | |
1152 | |
1153 m_writeBufferFill = f; | |
1154 if (readWriteEqual) m_readBufferFill = f; | |
1155 | |
1156 } else { | |
1157 | |
1158 // space must be a multiple of generatorBlockSize | |
1159 space = (space / generatorBlockSize) * generatorBlockSize; | |
1160 if (space == 0) return false; | |
1161 | |
1162 if (tmpSize < channels * space) { | |
1163 delete[] tmp; | |
1164 tmp = new float[channels * space]; | |
1165 tmpSize = channels * space; | |
1166 } | |
1167 | |
1168 for (size_t c = 0; c < channels; ++c) { | |
1169 | |
1170 bufferPtrs[c] = tmp + c * space; | |
1171 | |
1172 for (size_t i = 0; i < space; ++i) { | |
1173 tmp[c * space + i] = 0.0f; | |
1174 } | |
1175 } | |
1176 | |
1177 size_t got = mixModels(f, space, bufferPtrs); | |
1178 | |
1179 for (size_t c = 0; c < channels; ++c) { | |
1180 | |
1181 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
1182 if (wb) { | |
1183 size_t actual = wb->write(bufferPtrs[c], got); | |
1184 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1185 std::cout << "Wrote " << actual << " samples for ch " << c << ", now " | |
1186 << wb->getReadSpace() << " to read" | |
1187 << std::endl; | |
1188 #endif | |
1189 if (actual < got) { | |
1190 std::cerr << "WARNING: Buffer overrun in channel " << c | |
1191 << ": wrote " << actual << " of " << got | |
1192 << " samples" << std::endl; | |
1193 } | |
1194 } | |
1195 } | |
1196 | |
1197 m_writeBufferFill = f; | |
1198 if (readWriteEqual) m_readBufferFill = f; | |
1199 | |
1200 //!!! how do we know when ended? need to mark up a fully-buffered flag and check this if we find the buffers empty in getSourceSamples | |
1201 } | |
1202 | |
1203 return true; | |
1204 } | |
1205 | |
1206 size_t | |
1207 AudioCallbackPlaySource::mixModels(size_t &frame, size_t count, float **buffers) | |
1208 { | |
1209 size_t processed = 0; | |
1210 size_t chunkStart = frame; | |
1211 size_t chunkSize = count; | |
1212 size_t selectionSize = 0; | |
1213 size_t nextChunkStart = chunkStart + chunkSize; | |
1214 | |
1215 bool looping = m_viewManager->getPlayLoopMode(); | |
1216 bool constrained = (m_viewManager->getPlaySelectionMode() && | |
1217 !m_viewManager->getSelections().empty()); | |
1218 | |
1219 static float **chunkBufferPtrs = 0; | |
1220 static size_t chunkBufferPtrCount = 0; | |
1221 size_t channels = getTargetChannelCount(); | |
1222 | |
1223 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1224 std::cout << "Selection playback: start " << frame << ", size " << count <<", channels " << channels << std::endl; | |
1225 #endif | |
1226 | |
1227 if (chunkBufferPtrCount < channels) { | |
1228 if (chunkBufferPtrs) delete[] chunkBufferPtrs; | |
1229 chunkBufferPtrs = new float *[channels]; | |
1230 chunkBufferPtrCount = channels; | |
1231 } | |
1232 | |
1233 for (size_t c = 0; c < channels; ++c) { | |
1234 chunkBufferPtrs[c] = buffers[c]; | |
1235 } | |
1236 | |
1237 while (processed < count) { | |
1238 | |
1239 chunkSize = count - processed; | |
1240 nextChunkStart = chunkStart + chunkSize; | |
1241 selectionSize = 0; | |
1242 | |
1243 size_t fadeIn = 0, fadeOut = 0; | |
1244 | |
1245 if (constrained) { | |
1246 | |
1247 Selection selection = | |
1248 m_viewManager->getContainingSelection(chunkStart, true); | |
1249 | |
1250 if (selection.isEmpty()) { | |
1251 if (looping) { | |
1252 selection = *m_viewManager->getSelections().begin(); | |
1253 chunkStart = selection.getStartFrame(); | |
1254 fadeIn = 50; | |
1255 } | |
1256 } | |
1257 | |
1258 if (selection.isEmpty()) { | |
1259 | |
1260 chunkSize = 0; | |
1261 nextChunkStart = chunkStart; | |
1262 | |
1263 } else { | |
1264 | |
1265 selectionSize = | |
1266 selection.getEndFrame() - | |
1267 selection.getStartFrame(); | |
1268 | |
1269 if (chunkStart < selection.getStartFrame()) { | |
1270 chunkStart = selection.getStartFrame(); | |
1271 fadeIn = 50; | |
1272 } | |
1273 | |
1274 nextChunkStart = chunkStart + chunkSize; | |
1275 | |
1276 if (nextChunkStart >= selection.getEndFrame()) { | |
1277 nextChunkStart = selection.getEndFrame(); | |
1278 fadeOut = 50; | |
1279 } | |
1280 | |
1281 chunkSize = nextChunkStart - chunkStart; | |
1282 } | |
1283 | |
1284 } else if (looping && m_lastModelEndFrame > 0) { | |
1285 | |
1286 if (chunkStart >= m_lastModelEndFrame) { | |
1287 chunkStart = 0; | |
1288 } | |
1289 if (chunkSize > m_lastModelEndFrame - chunkStart) { | |
1290 chunkSize = m_lastModelEndFrame - chunkStart; | |
1291 } | |
1292 nextChunkStart = chunkStart + chunkSize; | |
1293 } | |
1294 | |
1295 // std::cout << "chunkStart " << chunkStart << ", chunkSize " << chunkSize << ", nextChunkStart " << nextChunkStart << ", frame " << frame << ", count " << count << ", processed " << processed << std::endl; | |
1296 | |
1297 if (!chunkSize) { | |
1298 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1299 std::cout << "Ending selection playback at " << nextChunkStart << std::endl; | |
1300 #endif | |
1301 // We need to maintain full buffers so that the other | |
1302 // thread can tell where it's got to in the playback -- so | |
1303 // return the full amount here | |
1304 frame = frame + count; | |
1305 return count; | |
1306 } | |
1307 | |
1308 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1309 std::cout << "Selection playback: chunk at " << chunkStart << " -> " << nextChunkStart << " (size " << chunkSize << ")" << std::endl; | |
1310 #endif | |
1311 | |
1312 size_t got = 0; | |
1313 | |
1314 if (selectionSize < 100) { | |
1315 fadeIn = 0; | |
1316 fadeOut = 0; | |
1317 } else if (selectionSize < 300) { | |
1318 if (fadeIn > 0) fadeIn = 10; | |
1319 if (fadeOut > 0) fadeOut = 10; | |
1320 } | |
1321 | |
1322 if (fadeIn > 0) { | |
1323 if (processed * 2 < fadeIn) { | |
1324 fadeIn = processed * 2; | |
1325 } | |
1326 } | |
1327 | |
1328 if (fadeOut > 0) { | |
1329 if ((count - processed - chunkSize) * 2 < fadeOut) { | |
1330 fadeOut = (count - processed - chunkSize) * 2; | |
1331 } | |
1332 } | |
1333 | |
1334 for (std::set<Model *>::iterator mi = m_models.begin(); | |
1335 mi != m_models.end(); ++mi) { | |
1336 | |
1337 got = m_audioGenerator->mixModel(*mi, chunkStart, | |
1338 chunkSize, chunkBufferPtrs, | |
1339 fadeIn, fadeOut); | |
1340 } | |
1341 | |
1342 for (size_t c = 0; c < channels; ++c) { | |
1343 chunkBufferPtrs[c] += chunkSize; | |
1344 } | |
1345 | |
1346 processed += chunkSize; | |
1347 chunkStart = nextChunkStart; | |
1348 } | |
1349 | |
1350 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1351 std::cout << "Returning selection playback " << processed << " frames to " << nextChunkStart << std::endl; | |
1352 #endif | |
1353 | |
1354 frame = nextChunkStart; | |
1355 return processed; | |
1356 } | |
1357 | |
1358 void | |
1359 AudioCallbackPlaySource::unifyRingBuffers() | |
1360 { | |
1361 if (m_readBuffers == m_writeBuffers) return; | |
1362 | |
1363 // only unify if there will be something to read | |
1364 for (size_t c = 0; c < getTargetChannelCount(); ++c) { | |
1365 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
1366 if (wb) { | |
1367 if (wb->getReadSpace() < m_blockSize * 2) { | |
1368 if ((m_writeBufferFill + m_blockSize * 2) < | |
1369 m_lastModelEndFrame) { | |
1370 // OK, we don't have enough and there's more to | |
1371 // read -- don't unify until we can do better | |
1372 return; | |
1373 } | |
1374 } | |
1375 break; | |
1376 } | |
1377 } | |
1378 | |
1379 size_t rf = m_readBufferFill; | |
1380 RingBuffer<float> *rb = getReadRingBuffer(0); | |
1381 if (rb) { | |
1382 size_t rs = rb->getReadSpace(); | |
1383 //!!! incorrect when in non-contiguous selection, see comments elsewhere | |
1384 // std::cout << "rs = " << rs << std::endl; | |
1385 if (rs < rf) rf -= rs; | |
1386 else rf = 0; | |
1387 } | |
1388 | |
1389 //std::cout << "m_readBufferFill = " << m_readBufferFill << ", rf = " << rf << ", m_writeBufferFill = " << m_writeBufferFill << std::endl; | |
1390 | |
1391 size_t wf = m_writeBufferFill; | |
1392 size_t skip = 0; | |
1393 for (size_t c = 0; c < getTargetChannelCount(); ++c) { | |
1394 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
1395 if (wb) { | |
1396 if (c == 0) { | |
1397 | |
1398 size_t wrs = wb->getReadSpace(); | |
1399 // std::cout << "wrs = " << wrs << std::endl; | |
1400 | |
1401 if (wrs < wf) wf -= wrs; | |
1402 else wf = 0; | |
1403 // std::cout << "wf = " << wf << std::endl; | |
1404 | |
1405 if (wf < rf) skip = rf - wf; | |
1406 if (skip == 0) break; | |
1407 } | |
1408 | |
1409 // std::cout << "skipping " << skip << std::endl; | |
1410 wb->skip(skip); | |
1411 } | |
1412 } | |
1413 | |
1414 m_bufferScavenger.claim(m_readBuffers); | |
1415 m_readBuffers = m_writeBuffers; | |
1416 m_readBufferFill = m_writeBufferFill; | |
1417 // std::cout << "unified" << std::endl; | |
1418 } | |
1419 | |
1420 void | |
1421 AudioCallbackPlaySource::FillThread::run() | |
1422 { | |
1423 AudioCallbackPlaySource &s(m_source); | |
1424 | |
1425 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1426 std::cout << "AudioCallbackPlaySourceFillThread starting" << std::endl; | |
1427 #endif | |
1428 | |
1429 s.m_mutex.lock(); | |
1430 | |
1431 bool previouslyPlaying = s.m_playing; | |
1432 bool work = false; | |
1433 | |
1434 while (!s.m_exiting) { | |
1435 | |
1436 s.unifyRingBuffers(); | |
1437 s.m_bufferScavenger.scavenge(); | |
1438 s.m_pluginScavenger.scavenge(); | |
1439 s.m_timeStretcherScavenger.scavenge(); | |
1440 | |
1441 if (work && s.m_playing && s.getSourceSampleRate()) { | |
1442 | |
1443 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1444 std::cout << "AudioCallbackPlaySourceFillThread: not waiting" << std::endl; | |
1445 #endif | |
1446 | |
1447 s.m_mutex.unlock(); | |
1448 s.m_mutex.lock(); | |
1449 | |
1450 } else { | |
1451 | |
1452 float ms = 100; | |
1453 if (s.getSourceSampleRate() > 0) { | |
1454 ms = float(m_ringBufferSize) / float(s.getSourceSampleRate()) * 1000.0; | |
1455 } | |
1456 | |
1457 if (s.m_playing) ms /= 10; | |
1458 | |
1459 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1460 if (!s.m_playing) std::cout << std::endl; | |
1461 std::cout << "AudioCallbackPlaySourceFillThread: waiting for " << ms << "ms..." << std::endl; | |
1462 #endif | |
1463 | |
1464 s.m_condition.wait(&s.m_mutex, size_t(ms)); | |
1465 } | |
1466 | |
1467 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1468 std::cout << "AudioCallbackPlaySourceFillThread: awoken" << std::endl; | |
1469 #endif | |
1470 | |
1471 work = false; | |
1472 | |
1473 if (!s.getSourceSampleRate()) continue; | |
1474 | |
1475 bool playing = s.m_playing; | |
1476 | |
1477 if (playing && !previouslyPlaying) { | |
1478 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1479 std::cout << "AudioCallbackPlaySourceFillThread: playback state changed, resetting" << std::endl; | |
1480 #endif | |
1481 for (size_t c = 0; c < s.getTargetChannelCount(); ++c) { | |
1482 RingBuffer<float> *rb = s.getReadRingBuffer(c); | |
1483 if (rb) rb->reset(); | |
1484 } | |
1485 } | |
1486 previouslyPlaying = playing; | |
1487 | |
1488 work = s.fillBuffers(); | |
1489 } | |
1490 | |
1491 s.m_mutex.unlock(); | |
1492 } | |
1493 |