Chris@43
|
1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
|
Chris@43
|
2
|
Chris@43
|
3 /*
|
Chris@43
|
4 Sonic Visualiser
|
Chris@43
|
5 An audio file viewer and annotation editor.
|
Chris@43
|
6 Centre for Digital Music, Queen Mary, University of London.
|
Chris@43
|
7 This file copyright 2006 Chris Cannam.
|
Chris@43
|
8
|
Chris@43
|
9 This program is free software; you can redistribute it and/or
|
Chris@43
|
10 modify it under the terms of the GNU General Public License as
|
Chris@43
|
11 published by the Free Software Foundation; either version 2 of the
|
Chris@43
|
12 License, or (at your option) any later version. See the file
|
Chris@43
|
13 COPYING included with this distribution for more information.
|
Chris@43
|
14 */
|
Chris@43
|
15
|
Chris@43
|
16 #include "AudioGenerator.h"
|
Chris@43
|
17
|
Chris@43
|
18 #include "base/TempDirectory.h"
|
Chris@43
|
19 #include "base/PlayParameters.h"
|
Chris@43
|
20 #include "base/PlayParameterRepository.h"
|
Chris@43
|
21 #include "base/Pitch.h"
|
Chris@43
|
22 #include "base/Exceptions.h"
|
Chris@43
|
23
|
Chris@43
|
24 #include "data/model/NoteModel.h"
|
Chris@43
|
25 #include "data/model/DenseTimeValueModel.h"
|
Chris@313
|
26 #include "data/model/SparseTimeValueModel.h"
|
Chris@43
|
27 #include "data/model/SparseOneDimensionalModel.h"
|
Chris@645
|
28 #include "base/NoteData.h"
|
Chris@43
|
29
|
Chris@307
|
30 #include "ClipMixer.h"
|
Chris@313
|
31 #include "ContinuousSynth.h"
|
Chris@307
|
32
|
Chris@43
|
33 #include <iostream>
|
Chris@167
|
34 #include <cmath>
|
Chris@43
|
35
|
Chris@43
|
36 #include <QDir>
|
Chris@43
|
37 #include <QFile>
|
Chris@43
|
38
|
Chris@436
|
39 const sv_frame_t
|
Chris@315
|
40 AudioGenerator::m_processingBlockSize = 1024;
|
Chris@43
|
41
|
Chris@43
|
42 QString
|
Chris@43
|
43 AudioGenerator::m_sampleDir = "";
|
Chris@43
|
44
|
Chris@43
|
45 //#define DEBUG_AUDIO_GENERATOR 1
|
Chris@43
|
46
|
Chris@43
|
47 AudioGenerator::AudioGenerator() :
|
Chris@43
|
48 m_sourceSampleRate(0),
|
Chris@43
|
49 m_targetChannelCount(1),
|
Chris@348
|
50 m_waveType(0),
|
Chris@382
|
51 m_soloing(false),
|
Chris@636
|
52 m_channelBuffer(nullptr),
|
Chris@382
|
53 m_channelBufSiz(0),
|
Chris@382
|
54 m_channelBufCount(0)
|
Chris@43
|
55 {
|
Chris@108
|
56 initialiseSampleDir();
|
Chris@43
|
57
|
Chris@43
|
58 connect(PlayParameterRepository::getInstance(),
|
Chris@682
|
59 SIGNAL(playClipIdChanged(int, QString)),
|
Chris@43
|
60 this,
|
Chris@682
|
61 SLOT(playClipIdChanged(int, QString)));
|
Chris@43
|
62 }
|
Chris@43
|
63
|
Chris@43
|
64 AudioGenerator::~AudioGenerator()
|
Chris@43
|
65 {
|
Chris@177
|
66 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@596
|
67 cerr << "AudioGenerator::~AudioGenerator" << endl;
|
Chris@177
|
68 #endif
|
Chris@593
|
69
|
Chris@593
|
70 for (int i = 0; i < m_channelBufCount; ++i) {
|
Chris@593
|
71 delete[] m_channelBuffer[i];
|
Chris@593
|
72 }
|
Chris@593
|
73 delete[] m_channelBuffer;
|
Chris@43
|
74 }
|
Chris@43
|
75
|
Chris@108
|
76 void
|
Chris@108
|
77 AudioGenerator::initialiseSampleDir()
|
Chris@43
|
78 {
|
Chris@108
|
79 if (m_sampleDir != "") return;
|
Chris@108
|
80
|
Chris@108
|
81 try {
|
Chris@108
|
82 m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
|
Chris@598
|
83 } catch (const DirectoryCreationFailed &f) {
|
Chris@293
|
84 cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
|
Chris@108
|
85 << " Failed to create temporary sample directory"
|
Chris@293
|
86 << endl;
|
Chris@108
|
87 m_sampleDir = "";
|
Chris@108
|
88 return;
|
Chris@108
|
89 }
|
Chris@108
|
90
|
Chris@108
|
91 QDir sampleResourceDir(":/samples", "*.wav");
|
Chris@108
|
92
|
Chris@108
|
93 for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
|
Chris@108
|
94
|
Chris@108
|
95 QString fileName(sampleResourceDir[i]);
|
Chris@108
|
96 QFile file(sampleResourceDir.filePath(fileName));
|
Chris@151
|
97 QString target = QDir(m_sampleDir).filePath(fileName);
|
Chris@108
|
98
|
Chris@151
|
99 if (!file.copy(target)) {
|
Chris@293
|
100 cerr << "WARNING: AudioGenerator::getSampleDir: "
|
Chris@294
|
101 << "Unable to copy " << fileName
|
Chris@108
|
102 << " into temporary directory \""
|
Chris@293
|
103 << m_sampleDir << "\"" << endl;
|
Chris@151
|
104 } else {
|
Chris@151
|
105 QFile tf(target);
|
Chris@151
|
106 tf.setPermissions(tf.permissions() |
|
Chris@151
|
107 QFile::WriteOwner |
|
Chris@151
|
108 QFile::WriteUser);
|
Chris@108
|
109 }
|
Chris@43
|
110 }
|
Chris@43
|
111 }
|
Chris@43
|
112
|
Chris@43
|
113 bool
|
Chris@682
|
114 AudioGenerator::addModel(ModelId modelId)
|
Chris@43
|
115 {
|
Chris@682
|
116 auto model = ModelById::get(modelId);
|
Chris@682
|
117 if (!model) return false;
|
Chris@682
|
118 if (!model->canPlay()) return false;
|
Chris@682
|
119
|
Chris@43
|
120 if (m_sourceSampleRate == 0) {
|
Chris@43
|
121
|
Chris@595
|
122 m_sourceSampleRate = model->getSampleRate();
|
Chris@43
|
123
|
Chris@43
|
124 } else {
|
Chris@43
|
125
|
Chris@682
|
126 auto dtvm = std::dynamic_pointer_cast<DenseTimeValueModel>(model);
|
Chris@43
|
127
|
Chris@595
|
128 if (dtvm) {
|
Chris@595
|
129 m_sourceSampleRate = model->getSampleRate();
|
Chris@595
|
130 return true;
|
Chris@595
|
131 }
|
Chris@43
|
132 }
|
Chris@307
|
133
|
Chris@682
|
134 PlayParameters *parameters =
|
Chris@682
|
135 PlayParameterRepository::getInstance()->getPlayParameters
|
Chris@682
|
136 (modelId.untyped);
|
Chris@418
|
137
|
Chris@682
|
138 if (!parameters) {
|
Chris@682
|
139 SVCERR << "WARNING: Model with canPlay true is not known to PlayParameterRepository" << endl;
|
Chris@682
|
140 return false;
|
Chris@682
|
141 }
|
Chris@418
|
142
|
Chris@418
|
143 bool willPlay = !parameters->isPlayMuted();
|
Chris@418
|
144
|
Chris@682
|
145 if (usesClipMixer(modelId)) {
|
Chris@682
|
146 ClipMixer *mixer = makeClipMixerFor(modelId);
|
Chris@313
|
147 if (mixer) {
|
Chris@313
|
148 QMutexLocker locker(&m_mutex);
|
Chris@682
|
149 m_clipMixerMap[modelId] = mixer;
|
Chris@418
|
150 return willPlay;
|
Chris@313
|
151 }
|
Chris@313
|
152 }
|
Chris@313
|
153
|
Chris@682
|
154 if (usesContinuousSynth(modelId)) {
|
Chris@682
|
155 ContinuousSynth *synth = makeSynthFor(modelId);
|
Chris@313
|
156 if (synth) {
|
Chris@313
|
157 QMutexLocker locker(&m_mutex);
|
Chris@682
|
158 m_continuousSynthMap[modelId] = synth;
|
Chris@418
|
159 return willPlay;
|
Chris@313
|
160 }
|
Chris@43
|
161 }
|
Chris@307
|
162
|
Chris@43
|
163 return false;
|
Chris@43
|
164 }
|
Chris@43
|
165
|
Chris@43
|
166 void
|
Chris@682
|
167 AudioGenerator::playClipIdChanged(int playableId, QString)
|
Chris@43
|
168 {
|
Chris@682
|
169 /*!!!
|
Chris@108
|
170 const Model *model = dynamic_cast<const Model *>(playable);
|
Chris@108
|
171 if (!model) {
|
Chris@309
|
172 cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
|
Chris@108
|
173 << playable << " is not a supported model type"
|
Chris@293
|
174 << endl;
|
Chris@108
|
175 return;
|
Chris@108
|
176 }
|
Chris@682
|
177 */
|
Chris@682
|
178 ModelId modelId;
|
Chris@682
|
179 modelId.untyped = playableId;
|
Chris@682
|
180
|
Chris@682
|
181 if (m_clipMixerMap.find(modelId) == m_clipMixerMap.end()) {
|
Chris@616
|
182 return;
|
Chris@616
|
183 }
|
Chris@307
|
184
|
Chris@682
|
185 ClipMixer *mixer = makeClipMixerFor(modelId);
|
Chris@307
|
186 if (mixer) {
|
Chris@43
|
187 QMutexLocker locker(&m_mutex);
|
Chris@682
|
188 ClipMixer *oldMixer = m_clipMixerMap[modelId];
|
Chris@682
|
189 m_clipMixerMap[modelId] = mixer;
|
Chris@682
|
190 delete oldMixer;
|
Chris@43
|
191 }
|
Chris@43
|
192 }
|
Chris@308
|
193
|
Chris@313
|
194 bool
|
Chris@682
|
195 AudioGenerator::usesClipMixer(ModelId modelId)
|
Chris@43
|
196 {
|
Chris@313
|
197 bool clip =
|
Chris@682
|
198 (ModelById::isa<SparseOneDimensionalModel>(modelId) ||
|
Chris@682
|
199 ModelById::isa<NoteModel>(modelId));
|
Chris@313
|
200 return clip;
|
Chris@43
|
201 }
|
Chris@43
|
202
|
Chris@313
|
203 bool
|
Chris@682
|
204 AudioGenerator::wantsQuieterClips(ModelId modelId)
|
Chris@349
|
205 {
|
Chris@349
|
206 // basically, anything that usually has sustain (like notes) or
|
Chris@349
|
207 // often has multiple sounds at once (like notes) wants to use a
|
Chris@349
|
208 // quieter level than simple click tracks
|
Chris@682
|
209 bool does = (ModelById::isa<NoteModel>(modelId));
|
Chris@349
|
210 return does;
|
Chris@349
|
211 }
|
Chris@349
|
212
|
Chris@349
|
213 bool
|
Chris@682
|
214 AudioGenerator::usesContinuousSynth(ModelId modelId)
|
Chris@43
|
215 {
|
Chris@682
|
216 bool cont = (ModelById::isa<SparseTimeValueModel>(modelId));
|
Chris@313
|
217 return cont;
|
Chris@313
|
218 }
|
Chris@313
|
219
|
Chris@307
|
220 ClipMixer *
|
Chris@682
|
221 AudioGenerator::makeClipMixerFor(ModelId modelId)
|
Chris@43
|
222 {
|
Chris@309
|
223 QString clipId;
|
Chris@43
|
224
|
Chris@43
|
225 PlayParameters *parameters =
|
Chris@682
|
226 PlayParameterRepository::getInstance()->getPlayParameters
|
Chris@682
|
227 (modelId.untyped);
|
Chris@43
|
228 if (parameters) {
|
Chris@309
|
229 clipId = parameters->getPlayClipId();
|
Chris@43
|
230 }
|
Chris@43
|
231
|
Chris@445
|
232 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@682
|
233 std::cerr << "AudioGenerator::makeClipMixerFor(" << modelId << "): sample id = " << clipId << std::endl;
|
Chris@445
|
234 #endif
|
Chris@276
|
235
|
Chris@309
|
236 if (clipId == "") {
|
Chris@682
|
237 SVDEBUG << "AudioGenerator::makeClipMixerFor(" << modelId << "): no sample, skipping" << endl;
|
Chris@636
|
238 return nullptr;
|
Chris@276
|
239 }
|
Chris@43
|
240
|
Chris@308
|
241 ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
|
Chris@308
|
242 m_sourceSampleRate,
|
Chris@308
|
243 m_processingBlockSize);
|
Chris@307
|
244
|
Chris@436
|
245 double clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0); // required
|
Chris@307
|
246
|
Chris@309
|
247 QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
|
Chris@307
|
248
|
Chris@682
|
249 double level = wantsQuieterClips(modelId) ? 0.5 : 1.0;
|
Chris@349
|
250 if (!mixer->loadClipData(clipPath, clipF0, level)) {
|
Chris@308
|
251 delete mixer;
|
Chris@636
|
252 return nullptr;
|
Chris@43
|
253 }
|
Chris@43
|
254
|
Chris@445
|
255 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@309
|
256 std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
|
Chris@445
|
257 #endif
|
Chris@43
|
258
|
Chris@308
|
259 return mixer;
|
Chris@308
|
260 }
|
Chris@43
|
261
|
Chris@313
|
262 ContinuousSynth *
|
Chris@682
|
263 AudioGenerator::makeSynthFor(ModelId)
|
Chris@313
|
264 {
|
Chris@313
|
265 ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
|
Chris@313
|
266 m_sourceSampleRate,
|
rmb456@323
|
267 m_processingBlockSize,
|
rmb456@323
|
268 m_waveType);
|
Chris@313
|
269
|
Chris@445
|
270 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@313
|
271 std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
|
Chris@445
|
272 #endif
|
Chris@313
|
273
|
Chris@313
|
274 return synth;
|
Chris@313
|
275 }
|
Chris@313
|
276
|
Chris@43
|
277 void
|
Chris@682
|
278 AudioGenerator::removeModel(ModelId modelId)
|
Chris@43
|
279 {
|
Chris@43
|
280 QMutexLocker locker(&m_mutex);
|
Chris@43
|
281
|
Chris@682
|
282 if (m_clipMixerMap.find(modelId) == m_clipMixerMap.end()) {
|
Chris@616
|
283 return;
|
Chris@616
|
284 }
|
Chris@43
|
285
|
Chris@682
|
286 ClipMixer *mixer = m_clipMixerMap[modelId];
|
Chris@682
|
287 m_clipMixerMap.erase(modelId);
|
Chris@308
|
288 delete mixer;
|
Chris@43
|
289 }
|
Chris@43
|
290
|
Chris@43
|
291 void
|
Chris@43
|
292 AudioGenerator::clearModels()
|
Chris@43
|
293 {
|
Chris@43
|
294 QMutexLocker locker(&m_mutex);
|
Chris@308
|
295
|
Chris@308
|
296 while (!m_clipMixerMap.empty()) {
|
Chris@308
|
297 ClipMixer *mixer = m_clipMixerMap.begin()->second;
|
Chris@595
|
298 m_clipMixerMap.erase(m_clipMixerMap.begin());
|
Chris@595
|
299 delete mixer;
|
Chris@43
|
300 }
|
Chris@43
|
301 }
|
Chris@43
|
302
|
Chris@43
|
303 void
|
Chris@43
|
304 AudioGenerator::reset()
|
Chris@43
|
305 {
|
Chris@43
|
306 QMutexLocker locker(&m_mutex);
|
Chris@308
|
307
|
Chris@445
|
308 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@397
|
309 cerr << "AudioGenerator::reset()" << endl;
|
Chris@445
|
310 #endif
|
Chris@397
|
311
|
Chris@616
|
312 for (ClipMixerMap::iterator i = m_clipMixerMap.begin();
|
Chris@616
|
313 i != m_clipMixerMap.end(); ++i) {
|
Chris@595
|
314 if (i->second) {
|
Chris@595
|
315 i->second->reset();
|
Chris@595
|
316 }
|
Chris@43
|
317 }
|
Chris@43
|
318
|
Chris@43
|
319 m_noteOffs.clear();
|
Chris@43
|
320 }
|
Chris@43
|
321
|
Chris@43
|
322 void
|
Chris@366
|
323 AudioGenerator::setTargetChannelCount(int targetChannelCount)
|
Chris@43
|
324 {
|
Chris@43
|
325 if (m_targetChannelCount == targetChannelCount) return;
|
Chris@43
|
326
|
Chris@233
|
327 // SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
|
Chris@43
|
328
|
Chris@43
|
329 QMutexLocker locker(&m_mutex);
|
Chris@43
|
330 m_targetChannelCount = targetChannelCount;
|
Chris@43
|
331
|
Chris@308
|
332 for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
|
Chris@595
|
333 if (i->second) i->second->setChannelCount(targetChannelCount);
|
Chris@43
|
334 }
|
Chris@43
|
335 }
|
Chris@43
|
336
|
Chris@436
|
337 sv_frame_t
|
Chris@43
|
338 AudioGenerator::getBlockSize() const
|
Chris@43
|
339 {
|
Chris@305
|
340 return m_processingBlockSize;
|
Chris@43
|
341 }
|
Chris@43
|
342
|
Chris@43
|
343 void
|
Chris@682
|
344 AudioGenerator::setSoloModelSet(std::set<ModelId> s)
|
Chris@43
|
345 {
|
Chris@43
|
346 QMutexLocker locker(&m_mutex);
|
Chris@43
|
347
|
Chris@43
|
348 m_soloModelSet = s;
|
Chris@43
|
349 m_soloing = true;
|
Chris@43
|
350 }
|
Chris@43
|
351
|
Chris@43
|
352 void
|
Chris@43
|
353 AudioGenerator::clearSoloModelSet()
|
Chris@43
|
354 {
|
Chris@43
|
355 QMutexLocker locker(&m_mutex);
|
Chris@43
|
356
|
Chris@43
|
357 m_soloModelSet.clear();
|
Chris@43
|
358 m_soloing = false;
|
Chris@43
|
359 }
|
Chris@43
|
360
|
Chris@436
|
361 sv_frame_t
|
Chris@682
|
362 AudioGenerator::mixModel(ModelId modelId,
|
Chris@613
|
363 sv_frame_t startFrame, sv_frame_t frameCount,
|
Chris@613
|
364 float **buffer,
|
Chris@613
|
365 sv_frame_t fadeIn, sv_frame_t fadeOut)
|
Chris@43
|
366 {
|
Chris@43
|
367 if (m_sourceSampleRate == 0) {
|
Chris@595
|
368 cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
|
Chris@595
|
369 return frameCount;
|
Chris@43
|
370 }
|
Chris@43
|
371
|
Chris@43
|
372 QMutexLocker locker(&m_mutex);
|
Chris@43
|
373
|
Chris@682
|
374 auto model = ModelById::get(modelId);
|
Chris@682
|
375 if (!model || !model->canPlay()) return frameCount;
|
Chris@108
|
376
|
Chris@43
|
377 PlayParameters *parameters =
|
Chris@682
|
378 PlayParameterRepository::getInstance()->getPlayParameters
|
Chris@682
|
379 (modelId.untyped);
|
Chris@43
|
380 if (!parameters) return frameCount;
|
Chris@43
|
381
|
Chris@43
|
382 bool playing = !parameters->isPlayMuted();
|
Chris@43
|
383 if (!playing) {
|
Chris@43
|
384 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@682
|
385 cout << "AudioGenerator::mixModel(" << modelId << "): muted" << endl;
|
Chris@43
|
386 #endif
|
Chris@43
|
387 return frameCount;
|
Chris@43
|
388 }
|
Chris@43
|
389
|
Chris@43
|
390 if (m_soloing) {
|
Chris@682
|
391 if (m_soloModelSet.find(modelId) == m_soloModelSet.end()) {
|
Chris@43
|
392 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@682
|
393 cout << "AudioGenerator::mixModel(" << modelId << "): not one of the solo'd models" << endl;
|
Chris@43
|
394 #endif
|
Chris@43
|
395 return frameCount;
|
Chris@43
|
396 }
|
Chris@43
|
397 }
|
Chris@43
|
398
|
Chris@43
|
399 float gain = parameters->getPlayGain();
|
Chris@43
|
400 float pan = parameters->getPlayPan();
|
Chris@43
|
401
|
Chris@682
|
402 if (std::dynamic_pointer_cast<DenseTimeValueModel>(model)) {
|
Chris@682
|
403 return mixDenseTimeValueModel(modelId, startFrame, frameCount,
|
Chris@595
|
404 buffer, gain, pan, fadeIn, fadeOut);
|
Chris@43
|
405 }
|
Chris@43
|
406
|
Chris@682
|
407 if (usesClipMixer(modelId)) {
|
Chris@682
|
408 return mixClipModel(modelId, startFrame, frameCount,
|
Chris@313
|
409 buffer, gain, pan);
|
Chris@313
|
410 }
|
Chris@43
|
411
|
Chris@682
|
412 if (usesContinuousSynth(modelId)) {
|
Chris@682
|
413 return mixContinuousSynthModel(modelId, startFrame, frameCount,
|
Chris@313
|
414 buffer, gain, pan);
|
Chris@43
|
415 }
|
Chris@43
|
416
|
Chris@682
|
417 std::cerr << "AudioGenerator::mixModel: WARNING: Model " << modelId << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
|
Chris@276
|
418
|
Chris@43
|
419 return frameCount;
|
Chris@43
|
420 }
|
Chris@43
|
421
|
Chris@436
|
422 sv_frame_t
|
Chris@682
|
423 AudioGenerator::mixDenseTimeValueModel(ModelId modelId,
|
Chris@595
|
424 sv_frame_t startFrame, sv_frame_t frames,
|
Chris@595
|
425 float **buffer, float gain, float pan,
|
Chris@595
|
426 sv_frame_t fadeIn, sv_frame_t fadeOut)
|
Chris@43
|
427 {
|
Chris@436
|
428 sv_frame_t maxFrames = frames + std::max(fadeIn, fadeOut);
|
Chris@43
|
429
|
Chris@682
|
430 auto dtvm = ModelById::getAs<DenseTimeValueModel>(modelId);
|
Chris@682
|
431 if (!dtvm) return 0;
|
Chris@682
|
432
|
Chris@366
|
433 int modelChannels = dtvm->getChannelCount();
|
Chris@80
|
434
|
Chris@382
|
435 if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
|
Chris@80
|
436
|
Chris@382
|
437 for (int c = 0; c < m_channelBufCount; ++c) {
|
Chris@382
|
438 delete[] m_channelBuffer[c];
|
Chris@80
|
439 }
|
Chris@80
|
440
|
Chris@595
|
441 delete[] m_channelBuffer;
|
Chris@382
|
442 m_channelBuffer = new float *[modelChannels];
|
Chris@80
|
443
|
Chris@366
|
444 for (int c = 0; c < modelChannels; ++c) {
|
Chris@382
|
445 m_channelBuffer[c] = new float[maxFrames];
|
Chris@80
|
446 }
|
Chris@80
|
447
|
Chris@382
|
448 m_channelBufCount = modelChannels;
|
Chris@595
|
449 m_channelBufSiz = maxFrames;
|
Chris@43
|
450 }
|
Chris@80
|
451
|
Chris@436
|
452 sv_frame_t got = 0;
|
Chris@80
|
453
|
Chris@80
|
454 if (startFrame >= fadeIn/2) {
|
Chris@460
|
455
|
Chris@460
|
456 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
|
Chris@460
|
457 startFrame - fadeIn/2,
|
Chris@460
|
458 frames + fadeOut/2 + fadeIn/2);
|
Chris@460
|
459
|
Chris@460
|
460 for (int c = 0; c < modelChannels; ++c) {
|
Chris@460
|
461 copy(data[c].begin(), data[c].end(), m_channelBuffer[c]);
|
Chris@460
|
462 }
|
Chris@460
|
463
|
Chris@461
|
464 got = data[0].size();
|
Chris@460
|
465
|
Chris@80
|
466 } else {
|
Chris@436
|
467 sv_frame_t missing = fadeIn/2 - startFrame;
|
Chris@80
|
468
|
Chris@382
|
469 if (missing > 0) {
|
Chris@382
|
470 cerr << "note: channelBufSiz = " << m_channelBufSiz
|
Chris@382
|
471 << ", frames + fadeOut/2 = " << frames + fadeOut/2
|
Chris@382
|
472 << ", startFrame = " << startFrame
|
Chris@382
|
473 << ", missing = " << missing << endl;
|
Chris@80
|
474 }
|
Chris@80
|
475
|
Chris@460
|
476 auto data = dtvm->getMultiChannelData(0, modelChannels - 1,
|
Chris@460
|
477 startFrame,
|
Chris@460
|
478 frames + fadeOut/2);
|
Chris@366
|
479 for (int c = 0; c < modelChannels; ++c) {
|
Chris@460
|
480 copy(data[c].begin(), data[c].end(), m_channelBuffer[c] + missing);
|
Chris@80
|
481 }
|
Chris@80
|
482
|
Chris@461
|
483 got = data[0].size() + missing;
|
Chris@595
|
484 }
|
Chris@43
|
485
|
Chris@366
|
486 for (int c = 0; c < m_targetChannelCount; ++c) {
|
Chris@43
|
487
|
Chris@595
|
488 int sourceChannel = (c % modelChannels);
|
Chris@43
|
489
|
Chris@595
|
490 // SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
|
Chris@43
|
491
|
Chris@595
|
492 float channelGain = gain;
|
Chris@595
|
493 if (pan != 0.0) {
|
Chris@595
|
494 if (c == 0) {
|
Chris@595
|
495 if (pan > 0.0) channelGain *= 1.0f - pan;
|
Chris@595
|
496 } else {
|
Chris@595
|
497 if (pan < 0.0) channelGain *= pan + 1.0f;
|
Chris@595
|
498 }
|
Chris@595
|
499 }
|
Chris@43
|
500
|
Chris@595
|
501 for (sv_frame_t i = 0; i < fadeIn/2; ++i) {
|
Chris@595
|
502 float *back = buffer[c];
|
Chris@595
|
503 back -= fadeIn/2;
|
Chris@595
|
504 back[i] +=
|
Chris@436
|
505 (channelGain * m_channelBuffer[sourceChannel][i] * float(i))
|
Chris@436
|
506 / float(fadeIn);
|
Chris@595
|
507 }
|
Chris@43
|
508
|
Chris@595
|
509 for (sv_frame_t i = 0; i < frames + fadeOut/2; ++i) {
|
Chris@595
|
510 float mult = channelGain;
|
Chris@595
|
511 if (i < fadeIn/2) {
|
Chris@595
|
512 mult = (mult * float(i)) / float(fadeIn);
|
Chris@595
|
513 }
|
Chris@595
|
514 if (i > frames - fadeOut/2) {
|
Chris@595
|
515 mult = (mult * float((frames + fadeOut/2) - i)) / float(fadeOut);
|
Chris@595
|
516 }
|
Chris@382
|
517 float val = m_channelBuffer[sourceChannel][i];
|
Chris@80
|
518 if (i >= got) val = 0.f;
|
Chris@595
|
519 buffer[c][i] += mult * val;
|
Chris@595
|
520 }
|
Chris@43
|
521 }
|
Chris@43
|
522
|
Chris@43
|
523 return got;
|
Chris@43
|
524 }
|
Chris@43
|
525
|
Chris@436
|
526 sv_frame_t
|
Chris@682
|
527 AudioGenerator::mixClipModel(ModelId modelId,
|
Chris@436
|
528 sv_frame_t startFrame, sv_frame_t frames,
|
Chris@313
|
529 float **buffer, float gain, float pan)
|
Chris@43
|
530 {
|
Chris@682
|
531 ClipMixer *clipMixer = m_clipMixerMap[modelId];
|
Chris@308
|
532 if (!clipMixer) return 0;
|
Chris@43
|
533
|
Chris@682
|
534 auto exportable = ModelById::getAs<NoteExportable>(modelId);
|
Chris@682
|
535
|
Chris@436
|
536 int blocks = int(frames / m_processingBlockSize);
|
Chris@43
|
537
|
Chris@313
|
538 //!!! todo: the below -- it matters
|
Chris@313
|
539
|
Chris@43
|
540 //!!! hang on -- the fact that the audio callback play source's
|
Chris@43
|
541 //buffer is a multiple of the plugin's buffer size doesn't mean
|
Chris@43
|
542 //that we always get called for a multiple of it here (because it
|
Chris@43
|
543 //also depends on the JACK block size). how should we ensure that
|
Chris@43
|
544 //all models write the same amount in to the mix, and that we
|
Chris@43
|
545 //always have a multiple of the plugin buffer size? I guess this
|
Chris@43
|
546 //class has to be queryable for the plugin buffer size & the
|
Chris@43
|
547 //callback play source has to use that as a multiple for all the
|
Chris@43
|
548 //calls to mixModel
|
Chris@43
|
549
|
Chris@436
|
550 sv_frame_t got = blocks * m_processingBlockSize;
|
Chris@43
|
551
|
Chris@43
|
552 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@442
|
553 cout << "mixModel [clip]: start " << startFrame << ", frames " << frames
|
Chris@442
|
554 << ", blocks " << blocks << ", have " << m_noteOffs.size()
|
Chris@442
|
555 << " note-offs" << endl;
|
Chris@43
|
556 #endif
|
Chris@43
|
557
|
Chris@308
|
558 ClipMixer::NoteStart on;
|
Chris@308
|
559 ClipMixer::NoteEnd off;
|
Chris@43
|
560
|
Chris@682
|
561 NoteOffSet ¬eOffs = m_noteOffs[modelId];
|
Chris@43
|
562
|
Chris@308
|
563 float **bufferIndexes = new float *[m_targetChannelCount];
|
Chris@308
|
564
|
Chris@646
|
565 //!!! + for first block, prime with notes already active
|
Chris@646
|
566
|
Chris@366
|
567 for (int i = 0; i < blocks; ++i) {
|
Chris@43
|
568
|
Chris@595
|
569 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
|
Chris@43
|
570
|
Chris@299
|
571 NoteList notes;
|
Chris@299
|
572 if (exportable) {
|
Chris@646
|
573 notes = exportable->getNotesStartingWithin(reqStart,
|
Chris@646
|
574 m_processingBlockSize);
|
Chris@299
|
575 }
|
Chris@43
|
576
|
Chris@308
|
577 std::vector<ClipMixer::NoteStart> starts;
|
Chris@308
|
578 std::vector<ClipMixer::NoteEnd> ends;
|
Chris@43
|
579
|
Chris@615
|
580 while (noteOffs.begin() != noteOffs.end() &&
|
Chris@615
|
581 noteOffs.begin()->onFrame > reqStart) {
|
Chris@615
|
582
|
Chris@615
|
583 // We must have jumped back in time, as there is a
|
Chris@615
|
584 // note-off pending for a note that hasn't begun yet. Emit
|
Chris@615
|
585 // the note-off now and discard
|
Chris@615
|
586
|
Chris@615
|
587 off.frameOffset = 0;
|
Chris@615
|
588 off.frequency = noteOffs.begin()->frequency;
|
Chris@615
|
589
|
Chris@615
|
590 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@615
|
591 cerr << "mixModel [clip]: adding rewind-caused note-off at frame offset 0 frequency " << off.frequency << endl;
|
Chris@615
|
592 #endif
|
Chris@615
|
593
|
Chris@615
|
594 ends.push_back(off);
|
Chris@615
|
595 noteOffs.erase(noteOffs.begin());
|
Chris@615
|
596 }
|
Chris@615
|
597
|
Chris@595
|
598 for (NoteList::const_iterator ni = notes.begin();
|
Chris@275
|
599 ni != notes.end(); ++ni) {
|
Chris@43
|
600
|
Chris@595
|
601 sv_frame_t noteFrame = ni->start;
|
Chris@596
|
602 sv_frame_t noteDuration = ni->duration;
|
Chris@43
|
603
|
Chris@595
|
604 if (noteFrame < reqStart ||
|
Chris@596
|
605 noteFrame >= reqStart + m_processingBlockSize) {
|
Chris@596
|
606 continue;
|
Chris@596
|
607 }
|
Chris@596
|
608
|
Chris@596
|
609 if (noteDuration == 0) {
|
Chris@596
|
610 // If we have a note-off and a note-on with the same
|
Chris@596
|
611 // time, then the note-off will be assumed (in the
|
Chris@596
|
612 // logic below that deals with two-point note-on/off
|
Chris@596
|
613 // events) to be switching off an earlier note before
|
Chris@596
|
614 // this one begins -- that's necessary in order to
|
Chris@596
|
615 // support adjoining notes of equal pitch. But it does
|
Chris@596
|
616 // mean we have to explicitly ignore zero-duration
|
Chris@596
|
617 // notes, otherwise they'll be played without end
|
Chris@596
|
618 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@596
|
619 cerr << "mixModel [clip]: zero-duration note found at frame " << noteFrame << ", skipping it" << endl;
|
Chris@596
|
620 #endif
|
Chris@596
|
621 continue;
|
Chris@596
|
622 }
|
Chris@43
|
623
|
Chris@595
|
624 while (noteOffs.begin() != noteOffs.end() &&
|
Chris@615
|
625 noteOffs.begin()->offFrame <= noteFrame) {
|
Chris@43
|
626
|
Chris@615
|
627 sv_frame_t eventFrame = noteOffs.begin()->offFrame;
|
Chris@308
|
628 if (eventFrame < reqStart) eventFrame = reqStart;
|
Chris@43
|
629
|
Chris@308
|
630 off.frameOffset = eventFrame - reqStart;
|
Chris@308
|
631 off.frequency = noteOffs.begin()->frequency;
|
Chris@43
|
632
|
Chris@43
|
633 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@595
|
634 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
|
Chris@43
|
635 #endif
|
Chris@43
|
636
|
Chris@308
|
637 ends.push_back(off);
|
Chris@595
|
638 noteOffs.erase(noteOffs.begin());
|
Chris@595
|
639 }
|
Chris@43
|
640
|
Chris@308
|
641 on.frameOffset = noteFrame - reqStart;
|
Chris@308
|
642 on.frequency = ni->getFrequency();
|
Chris@436
|
643 on.level = float(ni->velocity) / 127.0f;
|
Chris@308
|
644 on.pan = pan;
|
Chris@43
|
645
|
Chris@43
|
646 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@595
|
647 cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
|
Chris@43
|
648 #endif
|
Chris@595
|
649
|
Chris@308
|
650 starts.push_back(on);
|
Chris@595
|
651 noteOffs.insert
|
Chris@615
|
652 (NoteOff(on.frequency, noteFrame + noteDuration, noteFrame));
|
Chris@595
|
653 }
|
Chris@43
|
654
|
Chris@595
|
655 while (noteOffs.begin() != noteOffs.end() &&
|
Chris@615
|
656 noteOffs.begin()->offFrame <=
|
Chris@615
|
657 reqStart + m_processingBlockSize) {
|
Chris@43
|
658
|
Chris@615
|
659 sv_frame_t eventFrame = noteOffs.begin()->offFrame;
|
Chris@308
|
660 if (eventFrame < reqStart) eventFrame = reqStart;
|
Chris@43
|
661
|
Chris@308
|
662 off.frameOffset = eventFrame - reqStart;
|
Chris@308
|
663 off.frequency = noteOffs.begin()->frequency;
|
Chris@43
|
664
|
Chris@43
|
665 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@313
|
666 cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
|
Chris@43
|
667 #endif
|
Chris@43
|
668
|
Chris@308
|
669 ends.push_back(off);
|
Chris@308
|
670 noteOffs.erase(noteOffs.begin());
|
Chris@595
|
671 }
|
Chris@43
|
672
|
Chris@595
|
673 for (int c = 0; c < m_targetChannelCount; ++c) {
|
Chris@308
|
674 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
|
Chris@308
|
675 }
|
Chris@43
|
676
|
Chris@308
|
677 clipMixer->mix(bufferIndexes, gain, starts, ends);
|
Chris@308
|
678 }
|
Chris@43
|
679
|
Chris@308
|
680 delete[] bufferIndexes;
|
Chris@43
|
681
|
Chris@43
|
682 return got;
|
Chris@43
|
683 }
|
Chris@313
|
684
|
Chris@436
|
685 sv_frame_t
|
Chris@682
|
686 AudioGenerator::mixContinuousSynthModel(ModelId modelId,
|
Chris@436
|
687 sv_frame_t startFrame,
|
Chris@436
|
688 sv_frame_t frames,
|
Chris@313
|
689 float **buffer,
|
Chris@313
|
690 float gain,
|
Chris@313
|
691 float pan)
|
Chris@313
|
692 {
|
Chris@682
|
693 ContinuousSynth *synth = m_continuousSynthMap[modelId];
|
Chris@313
|
694 if (!synth) return 0;
|
Chris@313
|
695
|
Chris@313
|
696 // only type we support here at the moment
|
Chris@682
|
697 auto stvm = ModelById::getAs<SparseTimeValueModel>(modelId);
|
Chris@682
|
698 if (!stvm) return 0;
|
Chris@313
|
699 if (stvm->getScaleUnits() != "Hz") return 0;
|
Chris@313
|
700
|
Chris@436
|
701 int blocks = int(frames / m_processingBlockSize);
|
Chris@313
|
702
|
Chris@313
|
703 //!!! todo: see comment in mixClipModel
|
Chris@313
|
704
|
Chris@436
|
705 sv_frame_t got = blocks * m_processingBlockSize;
|
Chris@313
|
706
|
Chris@313
|
707 #ifdef DEBUG_AUDIO_GENERATOR
|
Chris@313
|
708 cout << "mixModel [synth]: frames " << frames
|
Chris@595
|
709 << ", blocks " << blocks << endl;
|
Chris@313
|
710 #endif
|
Chris@313
|
711
|
Chris@313
|
712 float **bufferIndexes = new float *[m_targetChannelCount];
|
Chris@313
|
713
|
Chris@366
|
714 for (int i = 0; i < blocks; ++i) {
|
Chris@313
|
715
|
Chris@595
|
716 sv_frame_t reqStart = startFrame + i * m_processingBlockSize;
|
Chris@313
|
717
|
Chris@595
|
718 for (int c = 0; c < m_targetChannelCount; ++c) {
|
Chris@313
|
719 bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
|
Chris@313
|
720 }
|
Chris@313
|
721
|
Chris@649
|
722 EventVector points =
|
Chris@649
|
723 stvm->getEventsStartingWithin(reqStart, m_processingBlockSize);
|
Chris@313
|
724
|
Chris@313
|
725 // by default, repeat last frequency
|
Chris@313
|
726 float f0 = 0.f;
|
Chris@313
|
727
|
Chris@649
|
728 // go straight to the last freq in this range
|
Chris@649
|
729 if (!points.empty()) {
|
Chris@649
|
730 f0 = points.rbegin()->getValue();
|
Chris@313
|
731 }
|
Chris@313
|
732
|
Chris@649
|
733 // if there is no such frequency and the next point is further
|
Chris@314
|
734 // away than twice the model resolution, go silent (same
|
Chris@314
|
735 // criterion TimeValueLayer uses for ending a discrete curve
|
Chris@314
|
736 // segment)
|
Chris@314
|
737 if (f0 == 0.f) {
|
Chris@650
|
738 Event nextP;
|
Chris@650
|
739 if (!stvm->getNearestEventMatching(reqStart + m_processingBlockSize,
|
Chris@650
|
740 [](Event) { return true; },
|
Chris@650
|
741 EventSeries::Forward,
|
Chris@650
|
742 nextP) ||
|
Chris@650
|
743 nextP.getFrame() > reqStart + 2 * stvm->getResolution()) {
|
Chris@314
|
744 f0 = -1.f;
|
Chris@314
|
745 }
|
Chris@314
|
746 }
|
Chris@650
|
747
|
Chris@315
|
748 // cerr << "f0 = " << f0 << endl;
|
Chris@313
|
749
|
Chris@313
|
750 synth->mix(bufferIndexes,
|
Chris@313
|
751 gain,
|
Chris@313
|
752 pan,
|
Chris@313
|
753 f0);
|
Chris@313
|
754 }
|
Chris@313
|
755
|
Chris@313
|
756 delete[] bufferIndexes;
|
Chris@313
|
757
|
Chris@313
|
758 return got;
|
Chris@313
|
759 }
|
Chris@313
|
760
|