Mercurial > hg > sonic-visualiser
comparison audioio/AudioCallbackPlaySource.cpp @ 0:cd5d7ff8ef38
* Reorganising code base. This revision will not compile.
author | Chris Cannam |
---|---|
date | Mon, 31 Jul 2006 12:03:45 +0000 |
parents | |
children | 40116f709d3b |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:cd5d7ff8ef38 |
---|---|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ | |
2 | |
3 /* | |
4 Sonic Visualiser | |
5 An audio file viewer and annotation editor. | |
6 Centre for Digital Music, Queen Mary, University of London. | |
7 This file copyright 2006 Chris Cannam. | |
8 | |
9 This program is free software; you can redistribute it and/or | |
10 modify it under the terms of the GNU General Public License as | |
11 published by the Free Software Foundation; either version 2 of the | |
12 License, or (at your option) any later version. See the file | |
13 COPYING included with this distribution for more information. | |
14 */ | |
15 | |
16 #include "AudioCallbackPlaySource.h" | |
17 | |
18 #include "AudioGenerator.h" | |
19 | |
20 #include "base/Model.h" | |
21 #include "base/ViewManager.h" | |
22 #include "base/PlayParameterRepository.h" | |
23 #include "model/DenseTimeValueModel.h" | |
24 #include "model/SparseOneDimensionalModel.h" | |
25 #include "IntegerTimeStretcher.h" | |
26 | |
27 #include <iostream> | |
28 #include <cassert> | |
29 | |
30 //#define DEBUG_AUDIO_PLAY_SOURCE 1 | |
31 //#define DEBUG_AUDIO_PLAY_SOURCE_PLAYING 1 | |
32 | |
33 //const size_t AudioCallbackPlaySource::m_ringBufferSize = 102400; | |
34 const size_t AudioCallbackPlaySource::m_ringBufferSize = 131071; | |
35 | |
36 AudioCallbackPlaySource::AudioCallbackPlaySource(ViewManager *manager) : | |
37 m_viewManager(manager), | |
38 m_audioGenerator(new AudioGenerator()), | |
39 m_readBuffers(0), | |
40 m_writeBuffers(0), | |
41 m_readBufferFill(0), | |
42 m_writeBufferFill(0), | |
43 m_bufferScavenger(1), | |
44 m_sourceChannelCount(0), | |
45 m_blockSize(1024), | |
46 m_sourceSampleRate(0), | |
47 m_targetSampleRate(0), | |
48 m_playLatency(0), | |
49 m_playing(false), | |
50 m_exiting(false), | |
51 m_lastModelEndFrame(0), | |
52 m_outputLeft(0.0), | |
53 m_outputRight(0.0), | |
54 m_slowdownCounter(0), | |
55 m_timeStretcher(0), | |
56 m_fillThread(0), | |
57 m_converter(0) | |
58 { | |
59 m_viewManager->setAudioPlaySource(this); | |
60 | |
61 connect(m_viewManager, SIGNAL(selectionChanged()), | |
62 this, SLOT(selectionChanged())); | |
63 connect(m_viewManager, SIGNAL(playLoopModeChanged()), | |
64 this, SLOT(playLoopModeChanged())); | |
65 connect(m_viewManager, SIGNAL(playSelectionModeChanged()), | |
66 this, SLOT(playSelectionModeChanged())); | |
67 | |
68 connect(PlayParameterRepository::getInstance(), | |
69 SIGNAL(playParametersChanged(PlayParameters *)), | |
70 this, SLOT(playParametersChanged(PlayParameters *))); | |
71 } | |
72 | |
73 AudioCallbackPlaySource::~AudioCallbackPlaySource() | |
74 { | |
75 m_exiting = true; | |
76 | |
77 if (m_fillThread) { | |
78 m_condition.wakeAll(); | |
79 m_fillThread->wait(); | |
80 delete m_fillThread; | |
81 } | |
82 | |
83 clearModels(); | |
84 | |
85 if (m_readBuffers != m_writeBuffers) { | |
86 delete m_readBuffers; | |
87 } | |
88 | |
89 delete m_writeBuffers; | |
90 | |
91 delete m_audioGenerator; | |
92 | |
93 m_bufferScavenger.scavenge(true); | |
94 } | |
95 | |
96 void | |
97 AudioCallbackPlaySource::addModel(Model *model) | |
98 { | |
99 if (m_models.find(model) != m_models.end()) return; | |
100 | |
101 bool canPlay = m_audioGenerator->addModel(model); | |
102 | |
103 m_mutex.lock(); | |
104 | |
105 m_models.insert(model); | |
106 if (model->getEndFrame() > m_lastModelEndFrame) { | |
107 m_lastModelEndFrame = model->getEndFrame(); | |
108 } | |
109 | |
110 bool buffersChanged = false, srChanged = false; | |
111 | |
112 size_t modelChannels = 1; | |
113 DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model); | |
114 if (dtvm) modelChannels = dtvm->getChannelCount(); | |
115 if (modelChannels > m_sourceChannelCount) { | |
116 m_sourceChannelCount = modelChannels; | |
117 } | |
118 | |
119 // std::cerr << "Adding model with " << modelChannels << " channels " << std::endl; | |
120 | |
121 if (m_sourceSampleRate == 0) { | |
122 | |
123 m_sourceSampleRate = model->getSampleRate(); | |
124 srChanged = true; | |
125 | |
126 } else if (model->getSampleRate() != m_sourceSampleRate) { | |
127 | |
128 // If this is a dense time-value model and we have no other, we | |
129 // can just switch to this model's sample rate | |
130 | |
131 if (dtvm) { | |
132 | |
133 bool conflicting = false; | |
134 | |
135 for (std::set<Model *>::const_iterator i = m_models.begin(); | |
136 i != m_models.end(); ++i) { | |
137 if (*i != dtvm && dynamic_cast<DenseTimeValueModel *>(*i)) { | |
138 std::cerr << "AudioCallbackPlaySource::addModel: Conflicting dense time-value model " << *i << " found" << std::endl; | |
139 conflicting = true; | |
140 break; | |
141 } | |
142 } | |
143 | |
144 if (conflicting) { | |
145 | |
146 std::cerr << "AudioCallbackPlaySource::addModel: ERROR: " | |
147 << "New model sample rate does not match" << std::endl | |
148 << "existing model(s) (new " << model->getSampleRate() | |
149 << " vs " << m_sourceSampleRate | |
150 << "), playback will be wrong" | |
151 << std::endl; | |
152 | |
153 emit sampleRateMismatch(model->getSampleRate(), m_sourceSampleRate, | |
154 false); | |
155 } else { | |
156 m_sourceSampleRate = model->getSampleRate(); | |
157 srChanged = true; | |
158 } | |
159 } | |
160 } | |
161 | |
162 if (!m_writeBuffers || (m_writeBuffers->size() < getTargetChannelCount())) { | |
163 clearRingBuffers(true, getTargetChannelCount()); | |
164 buffersChanged = true; | |
165 } else { | |
166 if (canPlay) clearRingBuffers(true); | |
167 } | |
168 | |
169 if (buffersChanged || srChanged) { | |
170 if (m_converter) { | |
171 src_delete(m_converter); | |
172 m_converter = 0; | |
173 } | |
174 } | |
175 | |
176 m_mutex.unlock(); | |
177 | |
178 m_audioGenerator->setTargetChannelCount(getTargetChannelCount()); | |
179 | |
180 if (!m_fillThread) { | |
181 m_fillThread = new AudioCallbackPlaySourceFillThread(*this); | |
182 m_fillThread->start(); | |
183 } | |
184 | |
185 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
186 std::cerr << "AudioCallbackPlaySource::addModel: emitting modelReplaced" << std::endl; | |
187 #endif | |
188 | |
189 if (buffersChanged || srChanged) { | |
190 emit modelReplaced(); | |
191 } | |
192 | |
193 m_condition.wakeAll(); | |
194 } | |
195 | |
196 void | |
197 AudioCallbackPlaySource::removeModel(Model *model) | |
198 { | |
199 m_mutex.lock(); | |
200 | |
201 m_models.erase(model); | |
202 | |
203 if (m_models.empty()) { | |
204 if (m_converter) { | |
205 src_delete(m_converter); | |
206 m_converter = 0; | |
207 } | |
208 m_sourceSampleRate = 0; | |
209 } | |
210 | |
211 size_t lastEnd = 0; | |
212 for (std::set<Model *>::const_iterator i = m_models.begin(); | |
213 i != m_models.end(); ++i) { | |
214 // std::cerr << "AudioCallbackPlaySource::removeModel(" << model << "): checking end frame on model " << *i << std::endl; | |
215 if ((*i)->getEndFrame() > lastEnd) lastEnd = (*i)->getEndFrame(); | |
216 // std::cerr << "(done, lastEnd now " << lastEnd << ")" << std::endl; | |
217 } | |
218 m_lastModelEndFrame = lastEnd; | |
219 | |
220 m_mutex.unlock(); | |
221 | |
222 m_audioGenerator->removeModel(model); | |
223 | |
224 clearRingBuffers(); | |
225 } | |
226 | |
227 void | |
228 AudioCallbackPlaySource::clearModels() | |
229 { | |
230 m_mutex.lock(); | |
231 | |
232 m_models.clear(); | |
233 | |
234 if (m_converter) { | |
235 src_delete(m_converter); | |
236 m_converter = 0; | |
237 } | |
238 | |
239 m_lastModelEndFrame = 0; | |
240 | |
241 m_sourceSampleRate = 0; | |
242 | |
243 m_mutex.unlock(); | |
244 | |
245 m_audioGenerator->clearModels(); | |
246 } | |
247 | |
248 void | |
249 AudioCallbackPlaySource::clearRingBuffers(bool haveLock, size_t count) | |
250 { | |
251 if (!haveLock) m_mutex.lock(); | |
252 | |
253 if (count == 0) { | |
254 if (m_writeBuffers) count = m_writeBuffers->size(); | |
255 } | |
256 | |
257 size_t sf = m_readBufferFill; | |
258 RingBuffer<float> *rb = getReadRingBuffer(0); | |
259 if (rb) { | |
260 //!!! This is incorrect if we're in a non-contiguous selection | |
261 //Same goes for all related code (subtracting the read space | |
262 //from the fill frame to try to establish where the effective | |
263 //pre-resample/timestretch read pointer is) | |
264 size_t rs = rb->getReadSpace(); | |
265 if (rs < sf) sf -= rs; | |
266 else sf = 0; | |
267 } | |
268 m_writeBufferFill = sf; | |
269 | |
270 if (m_readBuffers != m_writeBuffers) { | |
271 delete m_writeBuffers; | |
272 } | |
273 | |
274 m_writeBuffers = new RingBufferVector; | |
275 | |
276 for (size_t i = 0; i < count; ++i) { | |
277 m_writeBuffers->push_back(new RingBuffer<float>(m_ringBufferSize)); | |
278 } | |
279 | |
280 // std::cerr << "AudioCallbackPlaySource::clearRingBuffers: Created " | |
281 // << count << " write buffers" << std::endl; | |
282 | |
283 if (!haveLock) { | |
284 m_mutex.unlock(); | |
285 } | |
286 } | |
287 | |
288 void | |
289 AudioCallbackPlaySource::play(size_t startFrame) | |
290 { | |
291 if (m_viewManager->getPlaySelectionMode() && | |
292 !m_viewManager->getSelections().empty()) { | |
293 MultiSelection::SelectionList selections = m_viewManager->getSelections(); | |
294 MultiSelection::SelectionList::iterator i = selections.begin(); | |
295 if (i != selections.end()) { | |
296 if (startFrame < i->getStartFrame()) { | |
297 startFrame = i->getStartFrame(); | |
298 } else { | |
299 MultiSelection::SelectionList::iterator j = selections.end(); | |
300 --j; | |
301 if (startFrame >= j->getEndFrame()) { | |
302 startFrame = i->getStartFrame(); | |
303 } | |
304 } | |
305 } | |
306 } else { | |
307 if (startFrame >= m_lastModelEndFrame) { | |
308 startFrame = 0; | |
309 } | |
310 } | |
311 | |
312 // The fill thread will automatically empty its buffers before | |
313 // starting again if we have not so far been playing, but not if | |
314 // we're just re-seeking. | |
315 | |
316 m_mutex.lock(); | |
317 if (m_playing) { | |
318 m_readBufferFill = m_writeBufferFill = startFrame; | |
319 if (m_readBuffers) { | |
320 for (size_t c = 0; c < getTargetChannelCount(); ++c) { | |
321 RingBuffer<float> *rb = getReadRingBuffer(c); | |
322 if (rb) rb->reset(); | |
323 } | |
324 } | |
325 if (m_converter) src_reset(m_converter); | |
326 } else { | |
327 if (m_converter) src_reset(m_converter); | |
328 m_readBufferFill = m_writeBufferFill = startFrame; | |
329 } | |
330 m_mutex.unlock(); | |
331 | |
332 m_audioGenerator->reset(); | |
333 | |
334 bool changed = !m_playing; | |
335 m_playing = true; | |
336 m_condition.wakeAll(); | |
337 if (changed) emit playStatusChanged(m_playing); | |
338 } | |
339 | |
340 void | |
341 AudioCallbackPlaySource::stop() | |
342 { | |
343 bool changed = m_playing; | |
344 m_playing = false; | |
345 m_condition.wakeAll(); | |
346 if (changed) emit playStatusChanged(m_playing); | |
347 } | |
348 | |
349 void | |
350 AudioCallbackPlaySource::selectionChanged() | |
351 { | |
352 if (m_viewManager->getPlaySelectionMode()) { | |
353 clearRingBuffers(); | |
354 } | |
355 } | |
356 | |
357 void | |
358 AudioCallbackPlaySource::playLoopModeChanged() | |
359 { | |
360 clearRingBuffers(); | |
361 } | |
362 | |
363 void | |
364 AudioCallbackPlaySource::playSelectionModeChanged() | |
365 { | |
366 if (!m_viewManager->getSelections().empty()) { | |
367 clearRingBuffers(); | |
368 } | |
369 } | |
370 | |
371 void | |
372 AudioCallbackPlaySource::playParametersChanged(PlayParameters *params) | |
373 { | |
374 clearRingBuffers(); | |
375 } | |
376 | |
377 void | |
378 AudioCallbackPlaySource::setTargetBlockSize(size_t size) | |
379 { | |
380 // std::cerr << "AudioCallbackPlaySource::setTargetBlockSize() -> " << size << std::endl; | |
381 assert(size < m_ringBufferSize); | |
382 m_blockSize = size; | |
383 } | |
384 | |
385 size_t | |
386 AudioCallbackPlaySource::getTargetBlockSize() const | |
387 { | |
388 // std::cerr << "AudioCallbackPlaySource::getTargetBlockSize() -> " << m_blockSize << std::endl; | |
389 return m_blockSize; | |
390 } | |
391 | |
392 void | |
393 AudioCallbackPlaySource::setTargetPlayLatency(size_t latency) | |
394 { | |
395 m_playLatency = latency; | |
396 } | |
397 | |
398 size_t | |
399 AudioCallbackPlaySource::getTargetPlayLatency() const | |
400 { | |
401 return m_playLatency; | |
402 } | |
403 | |
404 size_t | |
405 AudioCallbackPlaySource::getCurrentPlayingFrame() | |
406 { | |
407 bool resample = false; | |
408 double ratio = 1.0; | |
409 | |
410 if (getSourceSampleRate() != getTargetSampleRate()) { | |
411 resample = true; | |
412 ratio = double(getSourceSampleRate()) / double(getTargetSampleRate()); | |
413 } | |
414 | |
415 size_t readSpace = 0; | |
416 for (size_t c = 0; c < getTargetChannelCount(); ++c) { | |
417 RingBuffer<float> *rb = getReadRingBuffer(c); | |
418 if (rb) { | |
419 size_t spaceHere = rb->getReadSpace(); | |
420 if (c == 0 || spaceHere < readSpace) readSpace = spaceHere; | |
421 } | |
422 } | |
423 | |
424 if (resample) { | |
425 readSpace = size_t(readSpace * ratio + 0.1); | |
426 } | |
427 | |
428 size_t latency = m_playLatency; | |
429 if (resample) latency = size_t(m_playLatency * ratio + 0.1); | |
430 | |
431 TimeStretcherData *timeStretcher = m_timeStretcher; | |
432 if (timeStretcher) { | |
433 latency += timeStretcher->getStretcher(0)->getProcessingLatency(); | |
434 } | |
435 | |
436 latency += readSpace; | |
437 size_t bufferedFrame = m_readBufferFill; | |
438 | |
439 bool looping = m_viewManager->getPlayLoopMode(); | |
440 bool constrained = (m_viewManager->getPlaySelectionMode() && | |
441 !m_viewManager->getSelections().empty()); | |
442 | |
443 size_t framePlaying = bufferedFrame; | |
444 | |
445 if (looping && !constrained) { | |
446 while (framePlaying < latency) framePlaying += m_lastModelEndFrame; | |
447 } | |
448 | |
449 if (framePlaying > latency) framePlaying -= latency; | |
450 else framePlaying = 0; | |
451 | |
452 if (!constrained) { | |
453 if (!looping && framePlaying > m_lastModelEndFrame) { | |
454 framePlaying = m_lastModelEndFrame; | |
455 stop(); | |
456 } | |
457 return framePlaying; | |
458 } | |
459 | |
460 MultiSelection::SelectionList selections = m_viewManager->getSelections(); | |
461 MultiSelection::SelectionList::const_iterator i; | |
462 | |
463 i = selections.begin(); | |
464 size_t rangeStart = i->getStartFrame(); | |
465 | |
466 i = selections.end(); | |
467 --i; | |
468 size_t rangeEnd = i->getEndFrame(); | |
469 | |
470 for (i = selections.begin(); i != selections.end(); ++i) { | |
471 if (i->contains(bufferedFrame)) break; | |
472 } | |
473 | |
474 size_t f = bufferedFrame; | |
475 | |
476 // std::cerr << "getCurrentPlayingFrame: f=" << f << ", latency=" << latency << ", rangeEnd=" << rangeEnd << std::endl; | |
477 | |
478 if (i == selections.end()) { | |
479 --i; | |
480 if (i->getEndFrame() + latency < f) { | |
481 // std::cerr << "framePlaying = " << framePlaying << ", rangeEnd = " << rangeEnd << std::endl; | |
482 | |
483 if (!looping && (framePlaying > rangeEnd)) { | |
484 // std::cerr << "STOPPING" << std::endl; | |
485 stop(); | |
486 return rangeEnd; | |
487 } else { | |
488 return framePlaying; | |
489 } | |
490 } else { | |
491 // std::cerr << "latency <- " << latency << "-(" << f << "-" << i->getEndFrame() << ")" << std::endl; | |
492 latency -= (f - i->getEndFrame()); | |
493 f = i->getEndFrame(); | |
494 } | |
495 } | |
496 | |
497 // std::cerr << "i=(" << i->getStartFrame() << "," << i->getEndFrame() << ") f=" << f << ", latency=" << latency << std::endl; | |
498 | |
499 while (latency > 0) { | |
500 size_t offset = f - i->getStartFrame(); | |
501 if (offset >= latency) { | |
502 if (f > latency) { | |
503 framePlaying = f - latency; | |
504 } else { | |
505 framePlaying = 0; | |
506 } | |
507 break; | |
508 } else { | |
509 if (i == selections.begin()) { | |
510 if (looping) { | |
511 i = selections.end(); | |
512 } | |
513 } | |
514 latency -= offset; | |
515 --i; | |
516 f = i->getEndFrame(); | |
517 } | |
518 } | |
519 | |
520 return framePlaying; | |
521 } | |
522 | |
523 void | |
524 AudioCallbackPlaySource::setOutputLevels(float left, float right) | |
525 { | |
526 m_outputLeft = left; | |
527 m_outputRight = right; | |
528 } | |
529 | |
530 bool | |
531 AudioCallbackPlaySource::getOutputLevels(float &left, float &right) | |
532 { | |
533 left = m_outputLeft; | |
534 right = m_outputRight; | |
535 return true; | |
536 } | |
537 | |
538 void | |
539 AudioCallbackPlaySource::setTargetSampleRate(size_t sr) | |
540 { | |
541 m_targetSampleRate = sr; | |
542 | |
543 if (getSourceSampleRate() != getTargetSampleRate()) { | |
544 | |
545 int err = 0; | |
546 m_converter = src_new(SRC_SINC_BEST_QUALITY, | |
547 getTargetChannelCount(), &err); | |
548 if (!m_converter) { | |
549 std::cerr | |
550 << "AudioCallbackPlaySource::setModel: ERROR in creating samplerate converter: " | |
551 << src_strerror(err) << std::endl; | |
552 | |
553 emit sampleRateMismatch(getSourceSampleRate(), | |
554 getTargetSampleRate(), | |
555 false); | |
556 } else { | |
557 | |
558 emit sampleRateMismatch(getSourceSampleRate(), | |
559 getTargetSampleRate(), | |
560 true); | |
561 } | |
562 } | |
563 } | |
564 | |
565 size_t | |
566 AudioCallbackPlaySource::getTargetSampleRate() const | |
567 { | |
568 if (m_targetSampleRate) return m_targetSampleRate; | |
569 else return getSourceSampleRate(); | |
570 } | |
571 | |
572 size_t | |
573 AudioCallbackPlaySource::getSourceChannelCount() const | |
574 { | |
575 return m_sourceChannelCount; | |
576 } | |
577 | |
578 size_t | |
579 AudioCallbackPlaySource::getTargetChannelCount() const | |
580 { | |
581 if (m_sourceChannelCount < 2) return 2; | |
582 return m_sourceChannelCount; | |
583 } | |
584 | |
585 size_t | |
586 AudioCallbackPlaySource::getSourceSampleRate() const | |
587 { | |
588 return m_sourceSampleRate; | |
589 } | |
590 | |
591 AudioCallbackPlaySource::TimeStretcherData::TimeStretcherData(size_t channels, | |
592 size_t factor, | |
593 size_t blockSize) : | |
594 m_factor(factor), | |
595 m_blockSize(blockSize) | |
596 { | |
597 // std::cerr << "TimeStretcherData::TimeStretcherData(" << channels << ", " << factor << ", " << blockSize << ")" << std::endl; | |
598 | |
599 for (size_t ch = 0; ch < channels; ++ch) { | |
600 m_stretcher[ch] = StretcherBuffer | |
601 //!!! We really need to measure performance and work out | |
602 //what sort of quality level to use -- or at least to | |
603 //allow the user to configure it | |
604 (new IntegerTimeStretcher(factor, blockSize, 128), | |
605 new float[blockSize * factor]); | |
606 } | |
607 m_stretchInputBuffer = new float[blockSize]; | |
608 } | |
609 | |
610 AudioCallbackPlaySource::TimeStretcherData::~TimeStretcherData() | |
611 { | |
612 // std::cerr << "TimeStretcherData::~TimeStretcherData" << std::endl; | |
613 | |
614 while (!m_stretcher.empty()) { | |
615 delete m_stretcher.begin()->second.first; | |
616 delete[] m_stretcher.begin()->second.second; | |
617 m_stretcher.erase(m_stretcher.begin()); | |
618 } | |
619 delete m_stretchInputBuffer; | |
620 } | |
621 | |
622 IntegerTimeStretcher * | |
623 AudioCallbackPlaySource::TimeStretcherData::getStretcher(size_t channel) | |
624 { | |
625 return m_stretcher[channel].first; | |
626 } | |
627 | |
628 float * | |
629 AudioCallbackPlaySource::TimeStretcherData::getOutputBuffer(size_t channel) | |
630 { | |
631 return m_stretcher[channel].second; | |
632 } | |
633 | |
634 float * | |
635 AudioCallbackPlaySource::TimeStretcherData::getInputBuffer() | |
636 { | |
637 return m_stretchInputBuffer; | |
638 } | |
639 | |
640 void | |
641 AudioCallbackPlaySource::TimeStretcherData::run(size_t channel) | |
642 { | |
643 getStretcher(channel)->process(getInputBuffer(), | |
644 getOutputBuffer(channel), | |
645 m_blockSize); | |
646 } | |
647 | |
648 void | |
649 AudioCallbackPlaySource::setSlowdownFactor(size_t factor) | |
650 { | |
651 // Avoid locks -- create, assign, mark old one for scavenging | |
652 // later (as a call to getSourceSamples may still be using it) | |
653 | |
654 TimeStretcherData *existingStretcher = m_timeStretcher; | |
655 | |
656 if (existingStretcher && existingStretcher->getFactor() == factor) { | |
657 return; | |
658 } | |
659 | |
660 if (factor > 1) { | |
661 TimeStretcherData *newStretcher = new TimeStretcherData | |
662 (getTargetChannelCount(), factor, getTargetBlockSize()); | |
663 m_slowdownCounter = 0; | |
664 m_timeStretcher = newStretcher; | |
665 } else { | |
666 m_timeStretcher = 0; | |
667 } | |
668 | |
669 if (existingStretcher) { | |
670 m_timeStretcherScavenger.claim(existingStretcher); | |
671 } | |
672 } | |
673 | |
674 size_t | |
675 AudioCallbackPlaySource::getSourceSamples(size_t count, float **buffer) | |
676 { | |
677 if (!m_playing) { | |
678 for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { | |
679 for (size_t i = 0; i < count; ++i) { | |
680 buffer[ch][i] = 0.0; | |
681 } | |
682 } | |
683 return 0; | |
684 } | |
685 | |
686 TimeStretcherData *timeStretcher = m_timeStretcher; | |
687 | |
688 if (!timeStretcher || timeStretcher->getFactor() == 1) { | |
689 | |
690 size_t got = 0; | |
691 | |
692 for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { | |
693 | |
694 RingBuffer<float> *rb = getReadRingBuffer(ch); | |
695 | |
696 if (rb) { | |
697 | |
698 // this is marginally more likely to leave our channels in | |
699 // sync after a processing failure than just passing "count": | |
700 size_t request = count; | |
701 if (ch > 0) request = got; | |
702 | |
703 got = rb->read(buffer[ch], request); | |
704 | |
705 #ifdef DEBUG_AUDIO_PLAY_SOURCE_PLAYING | |
706 std::cout << "AudioCallbackPlaySource::getSamples: got " << got << " samples on channel " << ch << ", signalling for more (possibly)" << std::endl; | |
707 #endif | |
708 } | |
709 | |
710 for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { | |
711 for (size_t i = got; i < count; ++i) { | |
712 buffer[ch][i] = 0.0; | |
713 } | |
714 } | |
715 } | |
716 | |
717 m_condition.wakeAll(); | |
718 return got; | |
719 } | |
720 | |
721 if (m_slowdownCounter == 0) { | |
722 | |
723 size_t got = 0; | |
724 float *ib = timeStretcher->getInputBuffer(); | |
725 | |
726 for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { | |
727 | |
728 RingBuffer<float> *rb = getReadRingBuffer(ch); | |
729 | |
730 if (rb) { | |
731 | |
732 size_t request = count; | |
733 if (ch > 0) request = got; // see above | |
734 got = rb->read(buffer[ch], request); | |
735 | |
736 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
737 std::cout << "AudioCallbackPlaySource::getSamples: got " << got << " samples on channel " << ch << ", running time stretcher" << std::endl; | |
738 #endif | |
739 | |
740 for (size_t i = 0; i < count; ++i) { | |
741 ib[i] = buffer[ch][i]; | |
742 } | |
743 | |
744 timeStretcher->run(ch); | |
745 } | |
746 } | |
747 | |
748 } else if (m_slowdownCounter >= timeStretcher->getFactor()) { | |
749 // reset this in case the factor has changed leaving the | |
750 // counter out of range | |
751 m_slowdownCounter = 0; | |
752 } | |
753 | |
754 for (size_t ch = 0; ch < getTargetChannelCount(); ++ch) { | |
755 | |
756 float *ob = timeStretcher->getOutputBuffer(ch); | |
757 | |
758 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
759 std::cerr << "AudioCallbackPlaySource::getSamples: Copying from (" << (m_slowdownCounter * count) << "," << count << ") to buffer" << std::endl; | |
760 #endif | |
761 | |
762 for (size_t i = 0; i < count; ++i) { | |
763 buffer[ch][i] = ob[m_slowdownCounter * count + i]; | |
764 } | |
765 } | |
766 | |
767 //!!! if (m_slowdownCounter == 0) m_condition.wakeAll(); | |
768 m_slowdownCounter = (m_slowdownCounter + 1) % timeStretcher->getFactor(); | |
769 return count; | |
770 } | |
771 | |
772 // Called from fill thread, m_playing true, mutex held | |
773 bool | |
774 AudioCallbackPlaySource::fillBuffers() | |
775 { | |
776 static float *tmp = 0; | |
777 static size_t tmpSize = 0; | |
778 | |
779 size_t space = 0; | |
780 for (size_t c = 0; c < getTargetChannelCount(); ++c) { | |
781 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
782 if (wb) { | |
783 size_t spaceHere = wb->getWriteSpace(); | |
784 if (c == 0 || spaceHere < space) space = spaceHere; | |
785 } | |
786 } | |
787 | |
788 if (space == 0) return false; | |
789 | |
790 size_t f = m_writeBufferFill; | |
791 | |
792 bool readWriteEqual = (m_readBuffers == m_writeBuffers); | |
793 | |
794 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
795 std::cout << "AudioCallbackPlaySourceFillThread: filling " << space << " frames" << std::endl; | |
796 #endif | |
797 | |
798 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
799 std::cout << "buffered to " << f << " already" << std::endl; | |
800 #endif | |
801 | |
802 bool resample = (getSourceSampleRate() != getTargetSampleRate()); | |
803 | |
804 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
805 std::cout << (resample ? "" : "not ") << "resampling (source " << getSourceSampleRate() << ", target " << getTargetSampleRate() << ")" << std::endl; | |
806 #endif | |
807 | |
808 size_t channels = getTargetChannelCount(); | |
809 | |
810 size_t orig = space; | |
811 size_t got = 0; | |
812 | |
813 static float **bufferPtrs = 0; | |
814 static size_t bufferPtrCount = 0; | |
815 | |
816 if (bufferPtrCount < channels) { | |
817 if (bufferPtrs) delete[] bufferPtrs; | |
818 bufferPtrs = new float *[channels]; | |
819 bufferPtrCount = channels; | |
820 } | |
821 | |
822 size_t generatorBlockSize = m_audioGenerator->getBlockSize(); | |
823 | |
824 if (resample && !m_converter) { | |
825 static bool warned = false; | |
826 if (!warned) { | |
827 std::cerr << "WARNING: sample rates differ, but no converter available!" << std::endl; | |
828 warned = true; | |
829 } | |
830 } | |
831 | |
832 if (resample && m_converter) { | |
833 | |
834 double ratio = | |
835 double(getTargetSampleRate()) / double(getSourceSampleRate()); | |
836 orig = size_t(orig / ratio + 0.1); | |
837 | |
838 // orig must be a multiple of generatorBlockSize | |
839 orig = (orig / generatorBlockSize) * generatorBlockSize; | |
840 if (orig == 0) return false; | |
841 | |
842 size_t work = std::max(orig, space); | |
843 | |
844 // We only allocate one buffer, but we use it in two halves. | |
845 // We place the non-interleaved values in the second half of | |
846 // the buffer (orig samples for channel 0, orig samples for | |
847 // channel 1 etc), and then interleave them into the first | |
848 // half of the buffer. Then we resample back into the second | |
849 // half (interleaved) and de-interleave the results back to | |
850 // the start of the buffer for insertion into the ringbuffers. | |
851 // What a faff -- especially as we've already de-interleaved | |
852 // the audio data from the source file elsewhere before we | |
853 // even reach this point. | |
854 | |
855 if (tmpSize < channels * work * 2) { | |
856 delete[] tmp; | |
857 tmp = new float[channels * work * 2]; | |
858 tmpSize = channels * work * 2; | |
859 } | |
860 | |
861 float *nonintlv = tmp + channels * work; | |
862 float *intlv = tmp; | |
863 float *srcout = tmp + channels * work; | |
864 | |
865 for (size_t c = 0; c < channels; ++c) { | |
866 for (size_t i = 0; i < orig; ++i) { | |
867 nonintlv[channels * i + c] = 0.0f; | |
868 } | |
869 } | |
870 | |
871 for (size_t c = 0; c < channels; ++c) { | |
872 bufferPtrs[c] = nonintlv + c * orig; | |
873 } | |
874 | |
875 got = mixModels(f, orig, bufferPtrs); | |
876 | |
877 // and interleave into first half | |
878 for (size_t c = 0; c < channels; ++c) { | |
879 for (size_t i = 0; i < got; ++i) { | |
880 float sample = nonintlv[c * got + i]; | |
881 intlv[channels * i + c] = sample; | |
882 } | |
883 } | |
884 | |
885 SRC_DATA data; | |
886 data.data_in = intlv; | |
887 data.data_out = srcout; | |
888 data.input_frames = got; | |
889 data.output_frames = work; | |
890 data.src_ratio = ratio; | |
891 data.end_of_input = 0; | |
892 | |
893 int err = src_process(m_converter, &data); | |
894 // size_t toCopy = size_t(work * ratio + 0.1); | |
895 size_t toCopy = size_t(got * ratio + 0.1); | |
896 | |
897 if (err) { | |
898 std::cerr | |
899 << "AudioCallbackPlaySourceFillThread: ERROR in samplerate conversion: " | |
900 << src_strerror(err) << std::endl; | |
901 //!!! Then what? | |
902 } else { | |
903 got = data.input_frames_used; | |
904 toCopy = data.output_frames_gen; | |
905 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
906 std::cerr << "Resampled " << got << " frames to " << toCopy << " frames" << std::endl; | |
907 #endif | |
908 } | |
909 | |
910 for (size_t c = 0; c < channels; ++c) { | |
911 for (size_t i = 0; i < toCopy; ++i) { | |
912 tmp[i] = srcout[channels * i + c]; | |
913 } | |
914 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
915 if (wb) wb->write(tmp, toCopy); | |
916 } | |
917 | |
918 m_writeBufferFill = f; | |
919 if (readWriteEqual) m_readBufferFill = f; | |
920 | |
921 } else { | |
922 | |
923 // space must be a multiple of generatorBlockSize | |
924 space = (space / generatorBlockSize) * generatorBlockSize; | |
925 if (space == 0) return false; | |
926 | |
927 if (tmpSize < channels * space) { | |
928 delete[] tmp; | |
929 tmp = new float[channels * space]; | |
930 tmpSize = channels * space; | |
931 } | |
932 | |
933 for (size_t c = 0; c < channels; ++c) { | |
934 | |
935 bufferPtrs[c] = tmp + c * space; | |
936 | |
937 for (size_t i = 0; i < space; ++i) { | |
938 tmp[c * space + i] = 0.0f; | |
939 } | |
940 } | |
941 | |
942 size_t got = mixModels(f, space, bufferPtrs); | |
943 | |
944 for (size_t c = 0; c < channels; ++c) { | |
945 | |
946 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
947 if (wb) wb->write(bufferPtrs[c], got); | |
948 | |
949 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
950 if (wb) | |
951 std::cerr << "Wrote " << got << " frames for ch " << c << ", now " | |
952 << wb->getReadSpace() << " to read" | |
953 << std::endl; | |
954 #endif | |
955 } | |
956 | |
957 m_writeBufferFill = f; | |
958 if (readWriteEqual) m_readBufferFill = f; | |
959 | |
960 //!!! how do we know when ended? need to mark up a fully-buffered flag and check this if we find the buffers empty in getSourceSamples | |
961 } | |
962 | |
963 return true; | |
964 } | |
965 | |
966 size_t | |
967 AudioCallbackPlaySource::mixModels(size_t &frame, size_t count, float **buffers) | |
968 { | |
969 size_t processed = 0; | |
970 size_t chunkStart = frame; | |
971 size_t chunkSize = count; | |
972 size_t selectionSize = 0; | |
973 size_t nextChunkStart = chunkStart + chunkSize; | |
974 | |
975 bool looping = m_viewManager->getPlayLoopMode(); | |
976 bool constrained = (m_viewManager->getPlaySelectionMode() && | |
977 !m_viewManager->getSelections().empty()); | |
978 | |
979 static float **chunkBufferPtrs = 0; | |
980 static size_t chunkBufferPtrCount = 0; | |
981 size_t channels = getTargetChannelCount(); | |
982 | |
983 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
984 std::cerr << "Selection playback: start " << frame << ", size " << count <<", channels " << channels << std::endl; | |
985 #endif | |
986 | |
987 if (chunkBufferPtrCount < channels) { | |
988 if (chunkBufferPtrs) delete[] chunkBufferPtrs; | |
989 chunkBufferPtrs = new float *[channels]; | |
990 chunkBufferPtrCount = channels; | |
991 } | |
992 | |
993 for (size_t c = 0; c < channels; ++c) { | |
994 chunkBufferPtrs[c] = buffers[c]; | |
995 } | |
996 | |
997 while (processed < count) { | |
998 | |
999 chunkSize = count - processed; | |
1000 nextChunkStart = chunkStart + chunkSize; | |
1001 selectionSize = 0; | |
1002 | |
1003 size_t fadeIn = 0, fadeOut = 0; | |
1004 | |
1005 if (constrained) { | |
1006 | |
1007 Selection selection = | |
1008 m_viewManager->getContainingSelection(chunkStart, true); | |
1009 | |
1010 if (selection.isEmpty()) { | |
1011 if (looping) { | |
1012 selection = *m_viewManager->getSelections().begin(); | |
1013 chunkStart = selection.getStartFrame(); | |
1014 fadeIn = 50; | |
1015 } | |
1016 } | |
1017 | |
1018 if (selection.isEmpty()) { | |
1019 | |
1020 chunkSize = 0; | |
1021 nextChunkStart = chunkStart; | |
1022 | |
1023 } else { | |
1024 | |
1025 selectionSize = | |
1026 selection.getEndFrame() - | |
1027 selection.getStartFrame(); | |
1028 | |
1029 if (chunkStart < selection.getStartFrame()) { | |
1030 chunkStart = selection.getStartFrame(); | |
1031 fadeIn = 50; | |
1032 } | |
1033 | |
1034 nextChunkStart = chunkStart + chunkSize; | |
1035 | |
1036 if (nextChunkStart >= selection.getEndFrame()) { | |
1037 nextChunkStart = selection.getEndFrame(); | |
1038 fadeOut = 50; | |
1039 } | |
1040 | |
1041 chunkSize = nextChunkStart - chunkStart; | |
1042 } | |
1043 | |
1044 } else if (looping && m_lastModelEndFrame > 0) { | |
1045 | |
1046 if (chunkStart >= m_lastModelEndFrame) { | |
1047 chunkStart = 0; | |
1048 } | |
1049 if (chunkSize > m_lastModelEndFrame - chunkStart) { | |
1050 chunkSize = m_lastModelEndFrame - chunkStart; | |
1051 } | |
1052 nextChunkStart = chunkStart + chunkSize; | |
1053 } | |
1054 | |
1055 // std::cerr << "chunkStart " << chunkStart << ", chunkSize " << chunkSize << ", nextChunkStart " << nextChunkStart << ", frame " << frame << ", count " << count << ", processed " << processed << std::endl; | |
1056 | |
1057 if (!chunkSize) { | |
1058 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1059 std::cerr << "Ending selection playback at " << nextChunkStart << std::endl; | |
1060 #endif | |
1061 // We need to maintain full buffers so that the other | |
1062 // thread can tell where it's got to in the playback -- so | |
1063 // return the full amount here | |
1064 frame = frame + count; | |
1065 return count; | |
1066 } | |
1067 | |
1068 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1069 std::cerr << "Selection playback: chunk at " << chunkStart << " -> " << nextChunkStart << " (size " << chunkSize << ")" << std::endl; | |
1070 #endif | |
1071 | |
1072 size_t got = 0; | |
1073 | |
1074 if (selectionSize < 100) { | |
1075 fadeIn = 0; | |
1076 fadeOut = 0; | |
1077 } else if (selectionSize < 300) { | |
1078 if (fadeIn > 0) fadeIn = 10; | |
1079 if (fadeOut > 0) fadeOut = 10; | |
1080 } | |
1081 | |
1082 if (fadeIn > 0) { | |
1083 if (processed * 2 < fadeIn) { | |
1084 fadeIn = processed * 2; | |
1085 } | |
1086 } | |
1087 | |
1088 if (fadeOut > 0) { | |
1089 if ((count - processed - chunkSize) * 2 < fadeOut) { | |
1090 fadeOut = (count - processed - chunkSize) * 2; | |
1091 } | |
1092 } | |
1093 | |
1094 for (std::set<Model *>::iterator mi = m_models.begin(); | |
1095 mi != m_models.end(); ++mi) { | |
1096 | |
1097 got = m_audioGenerator->mixModel(*mi, chunkStart, | |
1098 chunkSize, chunkBufferPtrs, | |
1099 fadeIn, fadeOut); | |
1100 } | |
1101 | |
1102 for (size_t c = 0; c < channels; ++c) { | |
1103 chunkBufferPtrs[c] += chunkSize; | |
1104 } | |
1105 | |
1106 processed += chunkSize; | |
1107 chunkStart = nextChunkStart; | |
1108 } | |
1109 | |
1110 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1111 std::cerr << "Returning selection playback " << processed << " frames to " << nextChunkStart << std::endl; | |
1112 #endif | |
1113 | |
1114 frame = nextChunkStart; | |
1115 return processed; | |
1116 } | |
1117 | |
1118 void | |
1119 AudioCallbackPlaySource::unifyRingBuffers() | |
1120 { | |
1121 if (m_readBuffers == m_writeBuffers) return; | |
1122 | |
1123 // only unify if there will be something to read | |
1124 for (size_t c = 0; c < getTargetChannelCount(); ++c) { | |
1125 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
1126 if (wb) { | |
1127 if (wb->getReadSpace() < m_blockSize * 2) { | |
1128 if ((m_writeBufferFill + m_blockSize * 2) < | |
1129 m_lastModelEndFrame) { | |
1130 // OK, we don't have enough and there's more to | |
1131 // read -- don't unify until we can do better | |
1132 return; | |
1133 } | |
1134 } | |
1135 break; | |
1136 } | |
1137 } | |
1138 | |
1139 size_t rf = m_readBufferFill; | |
1140 RingBuffer<float> *rb = getReadRingBuffer(0); | |
1141 if (rb) { | |
1142 size_t rs = rb->getReadSpace(); | |
1143 //!!! incorrect when in non-contiguous selection, see comments elsewhere | |
1144 // std::cerr << "rs = " << rs << std::endl; | |
1145 if (rs < rf) rf -= rs; | |
1146 else rf = 0; | |
1147 } | |
1148 | |
1149 //std::cerr << "m_readBufferFill = " << m_readBufferFill << ", rf = " << rf << ", m_writeBufferFill = " << m_writeBufferFill << std::endl; | |
1150 | |
1151 size_t wf = m_writeBufferFill; | |
1152 size_t skip = 0; | |
1153 for (size_t c = 0; c < getTargetChannelCount(); ++c) { | |
1154 RingBuffer<float> *wb = getWriteRingBuffer(c); | |
1155 if (wb) { | |
1156 if (c == 0) { | |
1157 | |
1158 size_t wrs = wb->getReadSpace(); | |
1159 // std::cerr << "wrs = " << wrs << std::endl; | |
1160 | |
1161 if (wrs < wf) wf -= wrs; | |
1162 else wf = 0; | |
1163 // std::cerr << "wf = " << wf << std::endl; | |
1164 | |
1165 if (wf < rf) skip = rf - wf; | |
1166 if (skip == 0) break; | |
1167 } | |
1168 | |
1169 // std::cerr << "skipping " << skip << std::endl; | |
1170 wb->skip(skip); | |
1171 } | |
1172 } | |
1173 | |
1174 m_bufferScavenger.claim(m_readBuffers); | |
1175 m_readBuffers = m_writeBuffers; | |
1176 m_readBufferFill = m_writeBufferFill; | |
1177 // std::cerr << "unified" << std::endl; | |
1178 } | |
1179 | |
1180 void | |
1181 AudioCallbackPlaySource::AudioCallbackPlaySourceFillThread::run() | |
1182 { | |
1183 AudioCallbackPlaySource &s(m_source); | |
1184 | |
1185 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1186 std::cerr << "AudioCallbackPlaySourceFillThread starting" << std::endl; | |
1187 #endif | |
1188 | |
1189 s.m_mutex.lock(); | |
1190 | |
1191 bool previouslyPlaying = s.m_playing; | |
1192 bool work = false; | |
1193 | |
1194 while (!s.m_exiting) { | |
1195 | |
1196 s.unifyRingBuffers(); | |
1197 s.m_bufferScavenger.scavenge(); | |
1198 s.m_timeStretcherScavenger.scavenge(); | |
1199 | |
1200 if (work && s.m_playing && s.getSourceSampleRate()) { | |
1201 | |
1202 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1203 std::cout << "AudioCallbackPlaySourceFillThread: not waiting" << std::endl; | |
1204 #endif | |
1205 | |
1206 s.m_mutex.unlock(); | |
1207 s.m_mutex.lock(); | |
1208 | |
1209 } else { | |
1210 | |
1211 float ms = 100; | |
1212 if (s.getSourceSampleRate() > 0) { | |
1213 ms = float(m_ringBufferSize) / float(s.getSourceSampleRate()) * 1000.0; | |
1214 } | |
1215 | |
1216 if (s.m_playing) ms /= 10; | |
1217 | |
1218 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1219 std::cout << "AudioCallbackPlaySourceFillThread: waiting for " << ms << "ms..." << std::endl; | |
1220 #endif | |
1221 | |
1222 s.m_condition.wait(&s.m_mutex, size_t(ms)); | |
1223 } | |
1224 | |
1225 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1226 std::cout << "AudioCallbackPlaySourceFillThread: awoken" << std::endl; | |
1227 #endif | |
1228 | |
1229 work = false; | |
1230 | |
1231 if (!s.getSourceSampleRate()) continue; | |
1232 | |
1233 bool playing = s.m_playing; | |
1234 | |
1235 if (playing && !previouslyPlaying) { | |
1236 #ifdef DEBUG_AUDIO_PLAY_SOURCE | |
1237 std::cout << "AudioCallbackPlaySourceFillThread: playback state changed, resetting" << std::endl; | |
1238 #endif | |
1239 for (size_t c = 0; c < s.getTargetChannelCount(); ++c) { | |
1240 RingBuffer<float> *rb = s.getReadRingBuffer(c); | |
1241 if (rb) rb->reset(); | |
1242 } | |
1243 } | |
1244 previouslyPlaying = playing; | |
1245 | |
1246 work = s.fillBuffers(); | |
1247 } | |
1248 | |
1249 s.m_mutex.unlock(); | |
1250 } | |
1251 | |
1252 | |
1253 | |
1254 #ifdef INCLUDE_MOCFILES | |
1255 #include "AudioCallbackPlaySource.moc.cpp" | |
1256 #endif | |
1257 |