Chris@137: /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ Chris@150: /* Chris@150: QM DSP Library Chris@150: Chris@150: Centre for Digital Music, Queen Mary, University of London. Chris@150: This file by Chris Cannam. Chris@150: Chris@150: This program is free software; you can redistribute it and/or Chris@150: modify it under the terms of the GNU General Public License as Chris@150: published by the Free Software Foundation; either version 2 of the Chris@150: License, or (at your option) any later version. See the file Chris@150: COPYING included with this distribution for more information. Chris@150: */ Chris@137: Chris@137: #include "Resampler.h" Chris@137: Chris@150: #include "maths/MathUtilities.h" Chris@150: #include "base/KaiserWindow.h" Chris@150: #include "base/SincWindow.h" Chris@150: #include "thread/Thread.h" Chris@137: Chris@137: #include Chris@138: #include Chris@145: #include Chris@147: #include Chris@138: Chris@138: using std::vector; Chris@145: using std::map; Chris@137: Chris@141: //#define DEBUG_RESAMPLER 1 Chris@141: Chris@137: Resampler::Resampler(int sourceRate, int targetRate) : Chris@137: m_sourceRate(sourceRate), Chris@137: m_targetRate(targetRate) Chris@137: { Chris@149: initialise(100, 0.02); Chris@149: } Chris@149: Chris@149: Resampler::Resampler(int sourceRate, int targetRate, Chris@149: double snr, double bandwidth) : Chris@149: m_sourceRate(sourceRate), Chris@149: m_targetRate(targetRate) Chris@149: { Chris@149: initialise(snr, bandwidth); Chris@137: } Chris@137: Chris@137: Resampler::~Resampler() Chris@137: { Chris@137: delete[] m_phaseData; Chris@137: } Chris@137: Chris@146: // peakToPole -> length -> beta -> window Chris@156: static map > > > Chris@146: knownFilters; Chris@146: Chris@146: static Mutex Chris@146: knownFilterMutex; Chris@146: Chris@137: void Chris@149: Resampler::initialise(double snr, double bandwidth) Chris@137: { Chris@137: int higher = std::max(m_sourceRate, m_targetRate); Chris@137: int lower = std::min(m_sourceRate, m_targetRate); Chris@137: Chris@137: m_gcd = MathUtilities::gcd(lower, higher); Chris@156: m_peakToPole = higher / m_gcd; Chris@137: Chris@156: if (m_targetRate < m_sourceRate) { Chris@156: // antialiasing filter, should be slightly below nyquist Chris@156: m_peakToPole = m_peakToPole / (1.0 - bandwidth/2.0); Chris@156: } Chris@137: Chris@137: KaiserWindow::Parameters params = Chris@156: KaiserWindow::parametersForBandwidth(snr, bandwidth, higher / m_gcd); Chris@137: Chris@137: params.length = Chris@137: (params.length % 2 == 0 ? params.length + 1 : params.length); Chris@137: Chris@147: params.length = Chris@147: (params.length > 200001 ? 200001 : params.length); Chris@147: Chris@137: m_filterLength = params.length; Chris@145: Chris@146: vector filter; Chris@146: knownFilterMutex.lock(); Chris@137: Chris@156: if (knownFilters[m_peakToPole][m_filterLength].find(params.beta) == Chris@156: knownFilters[m_peakToPole][m_filterLength].end()) { Chris@146: Chris@146: KaiserWindow kw(params); Chris@156: SincWindow sw(m_filterLength, m_peakToPole * 2); Chris@146: Chris@146: filter = vector(m_filterLength, 0.0); Chris@146: for (int i = 0; i < m_filterLength; ++i) filter[i] = 1.0; Chris@146: sw.cut(filter.data()); Chris@146: kw.cut(filter.data()); Chris@146: Chris@156: knownFilters[m_peakToPole][m_filterLength][params.beta] = filter; Chris@146: } Chris@146: Chris@156: filter = knownFilters[m_peakToPole][m_filterLength][params.beta]; Chris@146: knownFilterMutex.unlock(); Chris@137: Chris@137: int inputSpacing = m_targetRate / m_gcd; Chris@137: int outputSpacing = m_sourceRate / m_gcd; Chris@137: Chris@141: #ifdef DEBUG_RESAMPLER Chris@141: std::cerr << "resample " << m_sourceRate << " -> " << m_targetRate Chris@141: << ": inputSpacing " << inputSpacing << ", outputSpacing " Chris@141: << outputSpacing << ": filter length " << m_filterLength Chris@141: << std::endl; Chris@141: #endif Chris@137: Chris@147: // Now we have a filter of (odd) length flen in which the lower Chris@147: // sample rate corresponds to every n'th point and the higher rate Chris@147: // to every m'th where n and m are higher and lower rates divided Chris@147: // by their gcd respectively. So if x coordinates are on the same Chris@147: // scale as our filter resolution, then source sample i is at i * Chris@147: // (targetRate / gcd) and target sample j is at j * (sourceRate / Chris@147: // gcd). Chris@147: Chris@147: // To reconstruct a single target sample, we want a buffer (real Chris@147: // or virtual) of flen values formed of source samples spaced at Chris@147: // intervals of (targetRate / gcd), in our example case 3. This Chris@147: // is initially formed with the first sample at the filter peak. Chris@147: // Chris@147: // 0 0 0 0 a 0 0 b 0 Chris@147: // Chris@147: // and of course we have our filter Chris@147: // Chris@147: // f1 f2 f3 f4 f5 f6 f7 f8 f9 Chris@147: // Chris@147: // We take the sum of products of non-zero values from this buffer Chris@147: // with corresponding values in the filter Chris@147: // Chris@147: // a * f5 + b * f8 Chris@147: // Chris@147: // Then we drop (sourceRate / gcd) values, in our example case 4, Chris@147: // from the start of the buffer and fill until it has flen values Chris@147: // again Chris@147: // Chris@147: // a 0 0 b 0 0 c 0 0 Chris@147: // Chris@147: // repeat to reconstruct the next target sample Chris@147: // Chris@147: // a * f1 + b * f4 + c * f7 Chris@147: // Chris@147: // and so on. Chris@147: // Chris@147: // Above I said the buffer could be "real or virtual" -- ours is Chris@147: // virtual. We don't actually store all the zero spacing values, Chris@147: // except for padding at the start; normally we store only the Chris@147: // values that actually came from the source stream, along with a Chris@147: // phase value that tells us how many virtual zeroes there are at Chris@147: // the start of the virtual buffer. So the two examples above are Chris@147: // Chris@147: // 0 a b [ with phase 1 ] Chris@147: // a b c [ with phase 0 ] Chris@147: // Chris@147: // Having thus broken down the buffer so that only the elements we Chris@147: // need to multiply are present, we can also unzip the filter into Chris@147: // every-nth-element subsets at each phase, allowing us to do the Chris@147: // filter multiplication as a simply vector multiply. That is, rather Chris@147: // than store Chris@147: // Chris@147: // f1 f2 f3 f4 f5 f6 f7 f8 f9 Chris@147: // Chris@147: // we store separately Chris@147: // Chris@147: // f1 f4 f7 Chris@147: // f2 f5 f8 Chris@147: // f3 f6 f9 Chris@147: // Chris@147: // Each time we complete a multiply-and-sum, we need to work out Chris@147: // how many (real) samples to drop from the start of our buffer, Chris@147: // and how many to add at the end of it for the next multiply. We Chris@147: // know we want to drop enough real samples to move along by one Chris@147: // computed output sample, which is our outputSpacing number of Chris@147: // virtual buffer samples. Depending on the relationship between Chris@147: // input and output spacings, this may mean dropping several real Chris@147: // samples, one real sample, or none at all (and simply moving to Chris@147: // a different "phase"). Chris@147: Chris@137: m_phaseData = new Phase[inputSpacing]; Chris@137: Chris@137: for (int phase = 0; phase < inputSpacing; ++phase) { Chris@137: Chris@137: Phase p; Chris@137: Chris@137: p.nextPhase = phase - outputSpacing; Chris@137: while (p.nextPhase < 0) p.nextPhase += inputSpacing; Chris@137: p.nextPhase %= inputSpacing; Chris@137: Chris@141: p.drop = int(ceil(std::max(0.0, double(outputSpacing - phase)) Chris@141: / inputSpacing)); Chris@137: Chris@141: int filtZipLength = int(ceil(double(m_filterLength - phase) Chris@141: / inputSpacing)); Chris@147: Chris@137: for (int i = 0; i < filtZipLength; ++i) { Chris@137: p.filter.push_back(filter[i * inputSpacing + phase]); Chris@137: } Chris@137: Chris@137: m_phaseData[phase] = p; Chris@137: } Chris@137: Chris@137: // The May implementation of this uses a pull model -- we ask the Chris@137: // resampler for a certain number of output samples, and it asks Chris@137: // its source stream for as many as it needs to calculate Chris@137: // those. This means (among other things) that the source stream Chris@137: // can be asked for enough samples up-front to fill the buffer Chris@137: // before the first output sample is generated. Chris@137: // Chris@137: // In this implementation we're using a push model in which a Chris@137: // certain number of source samples is provided and we're asked Chris@137: // for as many output samples as that makes available. But we Chris@137: // can't return any samples from the beginning until half the Chris@137: // filter length has been provided as input. This means we must Chris@137: // either return a very variable number of samples (none at all Chris@137: // until the filter fills, then half the filter length at once) or Chris@137: // else have a lengthy declared latency on the output. We do the Chris@137: // latter. (What do other implementations do?) Chris@148: // Chris@147: // We want to make sure the first "real" sample will eventually be Chris@147: // aligned with the centre sample in the filter (it's tidier, and Chris@147: // easier to do diagnostic calculations that way). So we need to Chris@147: // pick the initial phase and buffer fill accordingly. Chris@147: // Chris@147: // Example: if the inputSpacing is 2, outputSpacing is 3, and Chris@147: // filter length is 7, Chris@147: // Chris@147: // x x x x a b c ... input samples Chris@147: // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 ... Chris@147: // i j k l ... output samples Chris@147: // [--------|--------] <- filter with centre mark Chris@147: // Chris@147: // Let h be the index of the centre mark, here 3 (generally Chris@147: // int(filterLength/2) for odd-length filters). Chris@147: // Chris@147: // The smallest n such that h + n * outputSpacing > filterLength Chris@147: // is 2 (that is, ceil((filterLength - h) / outputSpacing)), and Chris@147: // (h + 2 * outputSpacing) % inputSpacing == 1, so the initial Chris@147: // phase is 1. Chris@147: // Chris@147: // To achieve our n, we need to pre-fill the "virtual" buffer with Chris@147: // 4 zero samples: the x's above. This is int((h + n * Chris@147: // outputSpacing) / inputSpacing). It's the phase that makes this Chris@147: // buffer get dealt with in such a way as to give us an effective Chris@147: // index for sample a of 9 rather than 8 or 10 or whatever. Chris@147: // Chris@147: // This gives us output latency of 2 (== n), i.e. output samples i Chris@147: // and j will appear before the one in which input sample a is at Chris@147: // the centre of the filter. Chris@147: Chris@147: int h = int(m_filterLength / 2); Chris@147: int n = ceil(double(m_filterLength - h) / outputSpacing); Chris@141: Chris@147: m_phase = (h + n * outputSpacing) % inputSpacing; Chris@147: Chris@147: int fill = (h + n * outputSpacing) / inputSpacing; Chris@147: Chris@147: m_latency = n; Chris@147: Chris@147: m_buffer = vector(fill, 0); Chris@145: m_bufferOrigin = 0; Chris@141: Chris@141: #ifdef DEBUG_RESAMPLER Chris@141: std::cerr << "initial phase " << m_phase << " (as " << (m_filterLength/2) << " % " << inputSpacing << ")" Chris@141: << ", latency " << m_latency << std::endl; Chris@141: #endif Chris@137: } Chris@137: Chris@137: double Chris@141: Resampler::reconstructOne() Chris@137: { Chris@137: Phase &pd = m_phaseData[m_phase]; Chris@141: double v = 0.0; Chris@137: int n = pd.filter.size(); Chris@147: Chris@148: assert(n + m_bufferOrigin <= (int)m_buffer.size()); Chris@147: Chris@145: const double *const __restrict__ buf = m_buffer.data() + m_bufferOrigin; Chris@145: const double *const __restrict__ filt = pd.filter.data(); Chris@147: Chris@147: // std::cerr << "phase = " << m_phase << ", drop = " << pd.drop << ", buffer for reconstruction starts..."; Chris@147: // for (int i = 0; i < 20; ++i) { Chris@147: // if (i % 5 == 0) std::cerr << "\n" << i << " "; Chris@147: // std::cerr << buf[i] << " "; Chris@147: // } Chris@147: // std::cerr << std::endl; Chris@147: Chris@137: for (int i = 0; i < n; ++i) { Chris@145: // NB gcc can only vectorize this with -ffast-math Chris@145: v += buf[i] * filt[i]; Chris@137: } Chris@149: Chris@145: m_bufferOrigin += pd.drop; Chris@141: m_phase = pd.nextPhase; Chris@137: return v; Chris@137: } Chris@137: Chris@137: int Chris@141: Resampler::process(const double *src, double *dst, int n) Chris@137: { Chris@141: for (int i = 0; i < n; ++i) { Chris@141: m_buffer.push_back(src[i]); Chris@137: } Chris@137: Chris@141: int maxout = int(ceil(double(n) * m_targetRate / m_sourceRate)); Chris@141: int outidx = 0; Chris@139: Chris@141: #ifdef DEBUG_RESAMPLER Chris@141: std::cerr << "process: buf siz " << m_buffer.size() << " filt siz for phase " << m_phase << " " << m_phaseData[m_phase].filter.size() << std::endl; Chris@141: #endif Chris@141: Chris@156: double scaleFactor = (double(m_targetRate) / m_gcd) / m_peakToPole; Chris@142: Chris@141: while (outidx < maxout && Chris@145: m_buffer.size() >= m_phaseData[m_phase].filter.size() + m_bufferOrigin) { Chris@142: dst[outidx] = scaleFactor * reconstructOne(); Chris@141: outidx++; Chris@139: } Chris@145: Chris@145: m_buffer = vector(m_buffer.begin() + m_bufferOrigin, m_buffer.end()); Chris@145: m_bufferOrigin = 0; Chris@141: Chris@141: return outidx; Chris@137: } Chris@141: Chris@138: std::vector Chris@138: Resampler::resample(int sourceRate, int targetRate, const double *data, int n) Chris@138: { Chris@138: Resampler r(sourceRate, targetRate); Chris@138: Chris@138: int latency = r.getLatency(); Chris@138: Chris@143: // latency is the output latency. We need to provide enough Chris@143: // padding input samples at the end of input to guarantee at Chris@143: // *least* the latency's worth of output samples. that is, Chris@143: Chris@148: int inputPad = int(ceil((double(latency) * sourceRate) / targetRate)); Chris@143: Chris@143: // that means we are providing this much input in total: Chris@143: Chris@143: int n1 = n + inputPad; Chris@143: Chris@143: // and obtaining this much output in total: Chris@143: Chris@148: int m1 = int(ceil((double(n1) * targetRate) / sourceRate)); Chris@143: Chris@143: // in order to return this much output to the user: Chris@143: Chris@148: int m = int(ceil((double(n) * targetRate) / sourceRate)); Chris@143: Chris@148: // std::cerr << "n = " << n << ", sourceRate = " << sourceRate << ", targetRate = " << targetRate << ", m = " << m << ", latency = " << latency << ", inputPad = " << inputPad << ", m1 = " << m1 << ", n1 = " << n1 << ", n1 - n = " << n1 - n << std::endl; Chris@138: Chris@138: vector pad(n1 - n, 0.0); Chris@143: vector out(m1 + 1, 0.0); Chris@138: Chris@138: int got = r.process(data, out.data(), n); Chris@138: got += r.process(pad.data(), out.data() + got, pad.size()); Chris@138: Chris@141: #ifdef DEBUG_RESAMPLER Chris@141: std::cerr << "resample: " << n << " in, " << got << " out" << std::endl; Chris@147: std::cerr << "first 10 in:" << std::endl; Chris@147: for (int i = 0; i < 10; ++i) { Chris@147: std::cerr << data[i] << " "; Chris@147: if (i == 5) std::cerr << std::endl; Chris@141: } Chris@147: std::cerr << std::endl; Chris@141: #endif Chris@141: Chris@143: int toReturn = got - latency; Chris@143: if (toReturn > m) toReturn = m; Chris@143: Chris@147: vector sliced(out.begin() + latency, Chris@143: out.begin() + latency + toReturn); Chris@147: Chris@147: #ifdef DEBUG_RESAMPLER Chris@147: std::cerr << "all out (after latency compensation), length " << sliced.size() << ":"; Chris@147: for (int i = 0; i < sliced.size(); ++i) { Chris@147: if (i % 5 == 0) std::cerr << std::endl << i << "... "; Chris@147: std::cerr << sliced[i] << " "; Chris@147: } Chris@147: std::cerr << std::endl; Chris@147: #endif Chris@147: Chris@147: return sliced; Chris@138: } Chris@138: