annotate dsp/rateconversion/Resampler.cpp @ 372:d464286c007b

Fixes to latency and initial phase calculations (+ explanation)
author Chris Cannam <c.cannam@qmul.ac.uk>
date Thu, 17 Oct 2013 22:12:36 +0100
parents 33e9e964443c
children 9db2712b3ce4
rev   line source
c@362 1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
c@362 2
c@362 3 #include "Resampler.h"
c@362 4
c@362 5 #include "qm-dsp/maths/MathUtilities.h"
c@362 6 #include "qm-dsp/base/KaiserWindow.h"
c@362 7 #include "qm-dsp/base/SincWindow.h"
c@371 8 #include "qm-dsp/thread/Thread.h"
c@362 9
c@362 10 #include <iostream>
c@363 11 #include <vector>
c@370 12 #include <map>
c@372 13 #include <cassert>
c@363 14
c@363 15 using std::vector;
c@370 16 using std::map;
c@362 17
c@366 18 //#define DEBUG_RESAMPLER 1
c@366 19
c@362 20 Resampler::Resampler(int sourceRate, int targetRate) :
c@362 21 m_sourceRate(sourceRate),
c@362 22 m_targetRate(targetRate)
c@362 23 {
c@362 24 initialise();
c@362 25 }
c@362 26
c@362 27 Resampler::~Resampler()
c@362 28 {
c@362 29 delete[] m_phaseData;
c@362 30 }
c@362 31
c@371 32 // peakToPole -> length -> beta -> window
c@371 33 static map<int, map<int, map<double, vector<double> > > >
c@371 34 knownFilters;
c@371 35
c@371 36 static Mutex
c@371 37 knownFilterMutex;
c@371 38
c@362 39 void
c@362 40 Resampler::initialise()
c@362 41 {
c@362 42 int higher = std::max(m_sourceRate, m_targetRate);
c@362 43 int lower = std::min(m_sourceRate, m_targetRate);
c@362 44
c@362 45 m_gcd = MathUtilities::gcd(lower, higher);
c@362 46
c@362 47 int peakToPole = higher / m_gcd;
c@362 48
c@362 49 KaiserWindow::Parameters params =
c@362 50 KaiserWindow::parametersForBandwidth(100, 0.02, peakToPole);
c@362 51
c@362 52 params.length =
c@362 53 (params.length % 2 == 0 ? params.length + 1 : params.length);
c@362 54
c@372 55 params.length =
c@372 56 (params.length > 200001 ? 200001 : params.length);
c@372 57
c@362 58 m_filterLength = params.length;
c@370 59
c@371 60 vector<double> filter;
c@371 61 knownFilterMutex.lock();
c@362 62
c@371 63 if (knownFilters[peakToPole][m_filterLength].find(params.beta) ==
c@371 64 knownFilters[peakToPole][m_filterLength].end()) {
c@371 65
c@371 66 KaiserWindow kw(params);
c@371 67 SincWindow sw(m_filterLength, peakToPole * 2);
c@371 68
c@371 69 filter = vector<double>(m_filterLength, 0.0);
c@371 70 for (int i = 0; i < m_filterLength; ++i) filter[i] = 1.0;
c@371 71 sw.cut(filter.data());
c@371 72 kw.cut(filter.data());
c@372 73 /*
c@372 74 std::cerr << "sinc for " << params.length << ", " << params.beta
c@372 75 << ": ";
c@372 76 for (int i = 0; i < 10; ++i) {
c@372 77 std::cerr << sw.getWindow()[i] << " ";
c@372 78 }
c@372 79 std::cerr << std::endl;
c@371 80
c@372 81 std::cerr << "kaiser for " << params.length << ", " << params.beta
c@372 82 << ": ";
c@372 83 for (int i = 0; i < 10; ++i) {
c@372 84 std::cerr << kw.getWindow()[i] << " ";
c@372 85 }
c@372 86 std::cerr << std::endl;
c@372 87
c@372 88 std::cerr << "filter for " << params.length << ", " << params.beta
c@372 89 << ": ";
c@372 90 for (int i = 0; i < 10; ++i) {
c@372 91 std::cerr << filter[i] << " ";
c@372 92 }
c@372 93 std::cerr << std::endl;
c@372 94 */
c@371 95 knownFilters[peakToPole][m_filterLength][params.beta] = filter;
c@371 96 }
c@371 97
c@371 98 filter = knownFilters[peakToPole][m_filterLength][params.beta];
c@371 99 knownFilterMutex.unlock();
c@362 100
c@362 101 int inputSpacing = m_targetRate / m_gcd;
c@362 102 int outputSpacing = m_sourceRate / m_gcd;
c@362 103
c@366 104 #ifdef DEBUG_RESAMPLER
c@366 105 std::cerr << "resample " << m_sourceRate << " -> " << m_targetRate
c@366 106 << ": inputSpacing " << inputSpacing << ", outputSpacing "
c@366 107 << outputSpacing << ": filter length " << m_filterLength
c@366 108 << std::endl;
c@366 109 #endif
c@362 110
c@372 111 // Now we have a filter of (odd) length flen in which the lower
c@372 112 // sample rate corresponds to every n'th point and the higher rate
c@372 113 // to every m'th where n and m are higher and lower rates divided
c@372 114 // by their gcd respectively. So if x coordinates are on the same
c@372 115 // scale as our filter resolution, then source sample i is at i *
c@372 116 // (targetRate / gcd) and target sample j is at j * (sourceRate /
c@372 117 // gcd).
c@372 118
c@372 119 // To reconstruct a single target sample, we want a buffer (real
c@372 120 // or virtual) of flen values formed of source samples spaced at
c@372 121 // intervals of (targetRate / gcd), in our example case 3. This
c@372 122 // is initially formed with the first sample at the filter peak.
c@372 123 //
c@372 124 // 0 0 0 0 a 0 0 b 0
c@372 125 //
c@372 126 // and of course we have our filter
c@372 127 //
c@372 128 // f1 f2 f3 f4 f5 f6 f7 f8 f9
c@372 129 //
c@372 130 // We take the sum of products of non-zero values from this buffer
c@372 131 // with corresponding values in the filter
c@372 132 //
c@372 133 // a * f5 + b * f8
c@372 134 //
c@372 135 // Then we drop (sourceRate / gcd) values, in our example case 4,
c@372 136 // from the start of the buffer and fill until it has flen values
c@372 137 // again
c@372 138 //
c@372 139 // a 0 0 b 0 0 c 0 0
c@372 140 //
c@372 141 // repeat to reconstruct the next target sample
c@372 142 //
c@372 143 // a * f1 + b * f4 + c * f7
c@372 144 //
c@372 145 // and so on.
c@372 146 //
c@372 147 // Above I said the buffer could be "real or virtual" -- ours is
c@372 148 // virtual. We don't actually store all the zero spacing values,
c@372 149 // except for padding at the start; normally we store only the
c@372 150 // values that actually came from the source stream, along with a
c@372 151 // phase value that tells us how many virtual zeroes there are at
c@372 152 // the start of the virtual buffer. So the two examples above are
c@372 153 //
c@372 154 // 0 a b [ with phase 1 ]
c@372 155 // a b c [ with phase 0 ]
c@372 156 //
c@372 157 // Having thus broken down the buffer so that only the elements we
c@372 158 // need to multiply are present, we can also unzip the filter into
c@372 159 // every-nth-element subsets at each phase, allowing us to do the
c@372 160 // filter multiplication as a simply vector multiply. That is, rather
c@372 161 // than store
c@372 162 //
c@372 163 // f1 f2 f3 f4 f5 f6 f7 f8 f9
c@372 164 //
c@372 165 // we store separately
c@372 166 //
c@372 167 // f1 f4 f7
c@372 168 // f2 f5 f8
c@372 169 // f3 f6 f9
c@372 170 //
c@372 171 // Each time we complete a multiply-and-sum, we need to work out
c@372 172 // how many (real) samples to drop from the start of our buffer,
c@372 173 // and how many to add at the end of it for the next multiply. We
c@372 174 // know we want to drop enough real samples to move along by one
c@372 175 // computed output sample, which is our outputSpacing number of
c@372 176 // virtual buffer samples. Depending on the relationship between
c@372 177 // input and output spacings, this may mean dropping several real
c@372 178 // samples, one real sample, or none at all (and simply moving to
c@372 179 // a different "phase").
c@372 180
c@362 181 m_phaseData = new Phase[inputSpacing];
c@362 182
c@362 183 for (int phase = 0; phase < inputSpacing; ++phase) {
c@362 184
c@362 185 Phase p;
c@362 186
c@362 187 p.nextPhase = phase - outputSpacing;
c@362 188 while (p.nextPhase < 0) p.nextPhase += inputSpacing;
c@362 189 p.nextPhase %= inputSpacing;
c@362 190
c@366 191 p.drop = int(ceil(std::max(0.0, double(outputSpacing - phase))
c@366 192 / inputSpacing));
c@362 193
c@366 194 int filtZipLength = int(ceil(double(m_filterLength - phase)
c@366 195 / inputSpacing));
c@372 196
c@362 197 for (int i = 0; i < filtZipLength; ++i) {
c@362 198 p.filter.push_back(filter[i * inputSpacing + phase]);
c@362 199 }
c@362 200
c@362 201 m_phaseData[phase] = p;
c@362 202 }
c@362 203
c@362 204 // The May implementation of this uses a pull model -- we ask the
c@362 205 // resampler for a certain number of output samples, and it asks
c@362 206 // its source stream for as many as it needs to calculate
c@362 207 // those. This means (among other things) that the source stream
c@362 208 // can be asked for enough samples up-front to fill the buffer
c@362 209 // before the first output sample is generated.
c@362 210 //
c@362 211 // In this implementation we're using a push model in which a
c@362 212 // certain number of source samples is provided and we're asked
c@362 213 // for as many output samples as that makes available. But we
c@362 214 // can't return any samples from the beginning until half the
c@362 215 // filter length has been provided as input. This means we must
c@362 216 // either return a very variable number of samples (none at all
c@362 217 // until the filter fills, then half the filter length at once) or
c@362 218 // else have a lengthy declared latency on the output. We do the
c@362 219 // latter. (What do other implementations do?)
c@362 220
c@372 221 int centreToEnd = (m_filterLength/2) + 1; // from centre of filter
c@372 222 // to first sample after
c@372 223 // filter end
c@372 224
c@372 225 // We want to make sure the first "real" sample will eventually be
c@372 226 // aligned with the centre sample in the filter (it's tidier, and
c@372 227 // easier to do diagnostic calculations that way). So we need to
c@372 228 // pick the initial phase and buffer fill accordingly.
c@372 229 //
c@372 230 // Example: if the inputSpacing is 2, outputSpacing is 3, and
c@372 231 // filter length is 7,
c@372 232 //
c@372 233 // x x x x a b c ... input samples
c@372 234 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 ...
c@372 235 // i j k l ... output samples
c@372 236 // [--------|--------] <- filter with centre mark
c@372 237 //
c@372 238 // Let h be the index of the centre mark, here 3 (generally
c@372 239 // int(filterLength/2) for odd-length filters).
c@372 240 //
c@372 241 // The smallest n such that h + n * outputSpacing > filterLength
c@372 242 // is 2 (that is, ceil((filterLength - h) / outputSpacing)), and
c@372 243 // (h + 2 * outputSpacing) % inputSpacing == 1, so the initial
c@372 244 // phase is 1.
c@372 245 //
c@372 246 // To achieve our n, we need to pre-fill the "virtual" buffer with
c@372 247 // 4 zero samples: the x's above. This is int((h + n *
c@372 248 // outputSpacing) / inputSpacing). It's the phase that makes this
c@372 249 // buffer get dealt with in such a way as to give us an effective
c@372 250 // index for sample a of 9 rather than 8 or 10 or whatever.
c@372 251 //
c@372 252 // This gives us output latency of 2 (== n), i.e. output samples i
c@372 253 // and j will appear before the one in which input sample a is at
c@372 254 // the centre of the filter.
c@372 255
c@372 256 int h = int(m_filterLength / 2);
c@372 257 int n = ceil(double(m_filterLength - h) / outputSpacing);
c@366 258
c@372 259 m_phase = (h + n * outputSpacing) % inputSpacing;
c@372 260
c@372 261 int fill = (h + n * outputSpacing) / inputSpacing;
c@372 262
c@372 263 m_latency = n;
c@372 264
c@372 265 m_buffer = vector<double>(fill, 0);
c@370 266 m_bufferOrigin = 0;
c@366 267
c@366 268 #ifdef DEBUG_RESAMPLER
c@366 269 std::cerr << "initial phase " << m_phase << " (as " << (m_filterLength/2) << " % " << inputSpacing << ")"
c@366 270 << ", latency " << m_latency << std::endl;
c@366 271 #endif
c@362 272 }
c@362 273
c@362 274 double
c@366 275 Resampler::reconstructOne()
c@362 276 {
c@362 277 Phase &pd = m_phaseData[m_phase];
c@366 278 double v = 0.0;
c@362 279 int n = pd.filter.size();
c@372 280
c@372 281 assert(n + m_bufferOrigin <= m_buffer.size());
c@372 282
c@370 283 const double *const __restrict__ buf = m_buffer.data() + m_bufferOrigin;
c@370 284 const double *const __restrict__ filt = pd.filter.data();
c@372 285
c@372 286 // std::cerr << "phase = " << m_phase << ", drop = " << pd.drop << ", buffer for reconstruction starts...";
c@372 287 // for (int i = 0; i < 20; ++i) {
c@372 288 // if (i % 5 == 0) std::cerr << "\n" << i << " ";
c@372 289 // std::cerr << buf[i] << " ";
c@372 290 // }
c@372 291 // std::cerr << std::endl;
c@372 292
c@362 293 for (int i = 0; i < n; ++i) {
c@370 294 // NB gcc can only vectorize this with -ffast-math
c@370 295 v += buf[i] * filt[i];
c@362 296 }
c@370 297 m_bufferOrigin += pd.drop;
c@366 298 m_phase = pd.nextPhase;
c@362 299 return v;
c@362 300 }
c@362 301
c@362 302 int
c@366 303 Resampler::process(const double *src, double *dst, int n)
c@362 304 {
c@366 305 for (int i = 0; i < n; ++i) {
c@366 306 m_buffer.push_back(src[i]);
c@362 307 }
c@362 308
c@366 309 int maxout = int(ceil(double(n) * m_targetRate / m_sourceRate));
c@366 310 int outidx = 0;
c@364 311
c@366 312 #ifdef DEBUG_RESAMPLER
c@366 313 std::cerr << "process: buf siz " << m_buffer.size() << " filt siz for phase " << m_phase << " " << m_phaseData[m_phase].filter.size() << std::endl;
c@366 314 #endif
c@366 315
c@367 316 double scaleFactor = 1.0;
c@367 317 if (m_targetRate < m_sourceRate) {
c@367 318 scaleFactor = double(m_targetRate) / double(m_sourceRate);
c@367 319 }
c@367 320
c@366 321 while (outidx < maxout &&
c@370 322 m_buffer.size() >= m_phaseData[m_phase].filter.size() + m_bufferOrigin) {
c@367 323 dst[outidx] = scaleFactor * reconstructOne();
c@366 324 outidx++;
c@364 325 }
c@370 326
c@370 327 m_buffer = vector<double>(m_buffer.begin() + m_bufferOrigin, m_buffer.end());
c@370 328 m_bufferOrigin = 0;
c@366 329
c@366 330 return outidx;
c@362 331 }
c@366 332
c@363 333 std::vector<double>
c@363 334 Resampler::resample(int sourceRate, int targetRate, const double *data, int n)
c@363 335 {
c@363 336 Resampler r(sourceRate, targetRate);
c@363 337
c@363 338 int latency = r.getLatency();
c@363 339
c@368 340 // latency is the output latency. We need to provide enough
c@368 341 // padding input samples at the end of input to guarantee at
c@368 342 // *least* the latency's worth of output samples. that is,
c@368 343
c@368 344 int inputPad = int(ceil(double(latency * sourceRate) / targetRate));
c@368 345
c@368 346 // that means we are providing this much input in total:
c@368 347
c@368 348 int n1 = n + inputPad;
c@368 349
c@368 350 // and obtaining this much output in total:
c@368 351
c@368 352 int m1 = int(ceil(double(n1 * targetRate) / sourceRate));
c@368 353
c@368 354 // in order to return this much output to the user:
c@368 355
c@366 356 int m = int(ceil(double(n * targetRate) / sourceRate));
c@368 357
c@370 358 // std::cerr << "n = " << n << ", sourceRate = " << sourceRate << ", targetRate = " << targetRate << ", m = " << m << ", latency = " << latency << ", m1 = " << m1 << ", n1 = " << n1 << ", n1 - n = " << n1 - n << std::endl;
c@363 359
c@363 360 vector<double> pad(n1 - n, 0.0);
c@368 361 vector<double> out(m1 + 1, 0.0);
c@363 362
c@363 363 int got = r.process(data, out.data(), n);
c@363 364 got += r.process(pad.data(), out.data() + got, pad.size());
c@363 365
c@366 366 #ifdef DEBUG_RESAMPLER
c@366 367 std::cerr << "resample: " << n << " in, " << got << " out" << std::endl;
c@372 368 std::cerr << "first 10 in:" << std::endl;
c@372 369 for (int i = 0; i < 10; ++i) {
c@372 370 std::cerr << data[i] << " ";
c@372 371 if (i == 5) std::cerr << std::endl;
c@366 372 }
c@372 373 std::cerr << std::endl;
c@366 374 #endif
c@366 375
c@368 376 int toReturn = got - latency;
c@368 377 if (toReturn > m) toReturn = m;
c@368 378
c@372 379 vector<double> sliced(out.begin() + latency,
c@368 380 out.begin() + latency + toReturn);
c@372 381
c@372 382 #ifdef DEBUG_RESAMPLER
c@372 383 std::cerr << "all out (after latency compensation), length " << sliced.size() << ":";
c@372 384 for (int i = 0; i < sliced.size(); ++i) {
c@372 385 if (i % 5 == 0) std::cerr << std::endl << i << "... ";
c@372 386 std::cerr << sliced[i] << " ";
c@372 387 }
c@372 388 std::cerr << std::endl;
c@372 389 #endif
c@372 390
c@372 391 return sliced;
c@363 392 }
c@363 393