annotate transform/BeatDetectTransform.cpp @ 34:aaf73f7309f2

* Add "Export Audio File" option * Make note layer align in frequency with any spectrogram layer on the same view (if it's set to frequency mode) * Start to implement mouse editing for ranges of points by dragging the selection * First scrappy attempt at a vertical scale for time value layer
author Chris Cannam
date Mon, 27 Feb 2006 17:34:41 +0000
parents d86891498eef
children 39ae3dee27b9
rev   line source
Chris@0 1 /* -*- c-basic-offset: 4 -*- vi:set ts=8 sts=4 sw=4: */
Chris@0 2
Chris@0 3 /*
Chris@0 4 A waveform viewer and audio annotation editor.
Chris@2 5 Chris Cannam, Queen Mary University of London, 2005-2006
Chris@0 6
Chris@0 7 This is experimental software. Not for distribution.
Chris@0 8 */
Chris@0 9
Chris@0 10 #include "BeatDetectTransform.h"
Chris@0 11
Chris@0 12 #include "model/DenseTimeValueModel.h"
Chris@0 13 #include "model/SparseOneDimensionalModel.h"
Chris@0 14
Chris@0 15 #include <iostream>
Chris@0 16 #include "dsp/onsets/DetectionFunction.h"
Chris@0 17 #include "dsp/tempotracking/TempoTrack.h"
Chris@0 18
Chris@0 19
Chris@0 20 BeatDetectTransform::BeatDetectTransform(Model *inputModel) :
Chris@0 21 Transform(inputModel)
Chris@0 22 {
Chris@0 23 // Step resolution for the detection function in seconds
Chris@0 24 double stepSecs = 0.01161;
Chris@0 25
Chris@0 26 // Step resolution for the detection function in samples
Chris@0 27 size_t stepSize = (size_t)floor((double)inputModel->getSampleRate() *
Chris@0 28 stepSecs);
Chris@0 29
Chris@0 30
Chris@0 31 // m_w->m_bdf->setResolution(stepSize);
Chris@0 32 // output->setResolution(stepSize);
Chris@0 33
Chris@0 34 std::cerr << "BeatDetectTransform::BeatDetectTransform: input sample rate " << inputModel->getSampleRate() << ", stepSecs " << stepSecs << ", stepSize " << stepSize << ", unrounded stepSize " << double(inputModel->getSampleRate()) * stepSecs << ", output sample rate " << inputModel->getSampleRate() / stepSize << ", unrounded output sample rate " << double(inputModel->getSampleRate()) / double(stepSize) << std::endl;
Chris@0 35
Chris@0 36 m_output = new SparseOneDimensionalModel(inputModel->getSampleRate(), 1);
Chris@0 37 }
Chris@0 38
Chris@0 39 BeatDetectTransform::~BeatDetectTransform()
Chris@0 40 {
Chris@0 41 // parent does it all
Chris@0 42 }
Chris@0 43
Chris@0 44 TransformName
Chris@0 45 BeatDetectTransform::getName()
Chris@0 46 {
Chris@0 47 return tr("Beats");
Chris@0 48 }
Chris@0 49
Chris@0 50 void
Chris@0 51 BeatDetectTransform::run()
Chris@0 52 {
Chris@0 53 SparseOneDimensionalModel *output = getOutput();
Chris@0 54 DenseTimeValueModel *input = getInput();
Chris@0 55 if (!input) return;
Chris@0 56
Chris@0 57 DFConfig config;
Chris@0 58
Chris@0 59 config.DFType = DF_COMPLEXSD;
Chris@0 60
Chris@0 61 // Step resolution for the detection function in seconds
Chris@0 62 config.stepSecs = 0.01161;
Chris@0 63
Chris@0 64 // Step resolution for the detection function in samples
Chris@0 65 config.stepSize = (unsigned int)floor((double)input->getSampleRate() *
Chris@0 66 config.stepSecs );
Chris@0 67
Chris@0 68 config.frameLength = 2 * config.stepSize;
Chris@0 69
Chris@0 70 unsigned int stepSize = config.stepSize;
Chris@0 71 unsigned int frameLength = config.frameLength;
Chris@0 72
Chris@0 73 // m_w->m_bdf->setResolution(stepSize);
Chris@0 74 output->setResolution(stepSize);
Chris@0 75
Chris@0 76 //Tempo Tracking Configuration Parameters
Chris@0 77 TTParams ttparams;
Chris@0 78
Chris@0 79 // Low Pass filter coefficients for detection function smoothing
Chris@0 80 double* aCoeffs = new double[3];
Chris@0 81 double* bCoeffs = new double[3];
Chris@0 82
Chris@0 83 aCoeffs[ 0 ] = 1;
Chris@0 84 aCoeffs[ 1 ] = -0.5949;
Chris@0 85 aCoeffs[ 2 ] = 0.2348;
Chris@0 86 bCoeffs[ 0 ] = 0.1600;
Chris@0 87 bCoeffs[ 1 ] = 0.3200;
Chris@0 88 bCoeffs[ 2 ] = 0.1600;
Chris@0 89
Chris@0 90 ttparams.winLength = 512;
Chris@0 91 ttparams.lagLength = 128;
Chris@0 92 ttparams.LPOrd = 2;
Chris@0 93 ttparams.LPACoeffs = aCoeffs;
Chris@0 94 ttparams.LPBCoeffs = bCoeffs;
Chris@0 95 ttparams.alpha = 9;
Chris@0 96 ttparams.WinT.post = 8;
Chris@0 97 ttparams.WinT.pre = 7;
Chris@0 98
Chris@0 99 ////////////////////////////////////////////////////////////
Chris@0 100 // DetectionFunction
Chris@0 101 ////////////////////////////////////////////////////////////
Chris@0 102 // Instantiate and configure detection function object
Chris@0 103
Chris@0 104 DetectionFunction df(config);
Chris@0 105
Chris@0 106 size_t origin = input->getStartFrame();
Chris@0 107 size_t frameCount = input->getEndFrame() - origin;
Chris@0 108 size_t blocks = (frameCount / stepSize);
Chris@0 109 if (blocks * stepSize < frameCount) ++blocks;
Chris@0 110
Chris@0 111 double *buffer = new double[frameLength];
Chris@0 112
Chris@0 113 // DF output with causal extension
Chris@0 114 unsigned int clen = blocks + ttparams.winLength;
Chris@0 115 double *dfOutput = new double[clen];
Chris@0 116
Chris@0 117 std::cerr << "Detecting beats at step size " << stepSize << "..." << std::endl;
Chris@0 118
Chris@0 119 for (size_t i = 0; i < clen; ++i) {
Chris@0 120
Chris@0 121 // std::cerr << "block " << i << "/" << clen << std::endl;
Chris@0 122 // std::cerr << ".";
Chris@0 123
Chris@0 124 if (i < blocks) {
Chris@0 125 size_t got = input->getValues(-1, //!!! needs to come from parent layer -- which is not supposed to be in scope at this point
Chris@0 126 origin + i * stepSize,
Chris@0 127 origin + i * stepSize + frameLength,
Chris@0 128 buffer);
Chris@0 129 while (got < frameLength) buffer[got++] = 0.0;
Chris@0 130 dfOutput[i] = df.process(buffer);
Chris@0 131 } else {
Chris@0 132 dfOutput[i] = 0.0;
Chris@0 133 }
Chris@0 134
Chris@0 135 // m_w->m_bdf->addPoint(SparseTimeValueModel::Point
Chris@0 136 // (i * stepSize, dfOutput[i],
Chris@0 137 // QString("%1").arg(dfOutput[i])));
Chris@0 138 // m_w->m_bdf->setCompletion(i * 99 / clen);
Chris@0 139 output->setCompletion(i * 99 / clen);
Chris@0 140
Chris@0 141 if (m_deleting) {
Chris@0 142 delete [] buffer;
Chris@0 143 delete [] dfOutput;
Chris@0 144 delete [] aCoeffs;
Chris@0 145 delete [] bCoeffs;
Chris@0 146 return;
Chris@0 147 }
Chris@0 148 }
Chris@0 149
Chris@0 150 // m_w->m_bdf->setCompletion(100);
Chris@0 151
Chris@0 152 // Tempo Track Object instantiation and configuration
Chris@0 153 TempoTrack tempoTracker(ttparams);
Chris@0 154
Chris@0 155 // Vector of detected onsets
Chris@0 156 vector<int> beats;
Chris@0 157
Chris@0 158 std::cerr << "Running tempo tracker..." << std::endl;
Chris@0 159
Chris@0 160 beats = tempoTracker.process(dfOutput, blocks);
Chris@0 161
Chris@0 162 delete [] buffer;
Chris@0 163 delete [] dfOutput;
Chris@0 164 delete [] aCoeffs;
Chris@0 165 delete [] bCoeffs;
Chris@0 166
Chris@0 167 for (size_t i = 0; i < beats.size(); ++i) {
Chris@0 168 // std::cerr << "Beat value " << beats[i] << ", multiplying out to " << beats[i] * stepSize << std::endl;
Chris@0 169 float bpm = 0.0;
Chris@0 170 int fdiff = 0;
Chris@0 171 if (i < beats.size() - 1) {
Chris@0 172 fdiff = (beats[i+1] - beats[i]) * stepSize;
Chris@0 173 // one beat is fdiff frames, so there are samplerate/fdiff bps,
Chris@0 174 // so 60*samplerate/fdiff bpm
Chris@0 175 if (fdiff > 0) {
Chris@0 176 bpm = (60.0 * input->getSampleRate()) / fdiff;
Chris@0 177 }
Chris@0 178 }
Chris@0 179 output->addPoint(SparseOneDimensionalModel::Point
Chris@0 180 (origin + beats[i] * stepSize, QString("%1").arg(bpm)));
Chris@0 181 if (m_deleting) return;
Chris@0 182 }
Chris@0 183
Chris@0 184 output->setCompletion(100);
Chris@0 185 }
Chris@0 186
Chris@0 187 DenseTimeValueModel *
Chris@0 188 BeatDetectTransform::getInput()
Chris@0 189 {
Chris@0 190 DenseTimeValueModel *dtvm =
Chris@0 191 dynamic_cast<DenseTimeValueModel *>(getInputModel());
Chris@0 192 if (!dtvm) {
Chris@0 193 std::cerr << "BeatDetectTransform::getInput: WARNING: Input model is not conformable to DenseTimeValueModel" << std::endl;
Chris@0 194 }
Chris@0 195 return dtvm;
Chris@0 196 }
Chris@0 197
Chris@0 198 SparseOneDimensionalModel *
Chris@0 199 BeatDetectTransform::getOutput()
Chris@0 200 {
Chris@0 201 return static_cast<SparseOneDimensionalModel *>(getOutputModel());
Chris@0 202 }
Chris@0 203