matthiasm@0: /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */ Chris@9: matthiasm@0: /* Chris@9: pYIN - A fundamental frequency estimator for monophonic audio Chris@9: Centre for Digital Music, Queen Mary, University of London. Chris@9: Chris@9: This program is free software; you can redistribute it and/or Chris@9: modify it under the terms of the GNU General Public License as Chris@9: published by the Free Software Foundation; either version 2 of the Chris@9: License, or (at your option) any later version. See the file Chris@9: COPYING included with this distribution for more information. matthiasm@0: */ matthiasm@0: matthiasm@36: #include "PYinVamp.h" matthiasm@0: #include "MonoNote.h" mail@131: #include "MonoPitchHMM.h" matthiasm@0: matthiasm@0: #include matthiasm@0: #include matthiasm@0: matthiasm@0: #include matthiasm@0: #include matthiasm@0: #include matthiasm@0: matthiasm@0: using std::string; matthiasm@0: using std::vector; matthiasm@0: using Vamp::RealTime; matthiasm@0: matthiasm@0: matthiasm@36: PYinVamp::PYinVamp(float inputSampleRate) : matthiasm@0: Plugin(inputSampleRate), matthiasm@0: m_channels(0), matthiasm@0: m_stepSize(256), matthiasm@0: m_blockSize(2048), matthiasm@0: m_fmin(40), matthiasm@58: m_fmax(1600), matthiasm@0: m_yin(2048, inputSampleRate, 0.0), matthiasm@0: m_oF0Candidates(0), matthiasm@0: m_oF0Probs(0), matthiasm@0: m_oVoicedProb(0), matthiasm@0: m_oCandidateSalience(0), matthiasm@0: m_oSmoothedPitchTrack(0), matthiasm@0: m_oNotes(0), matthiasm@0: m_threshDistr(2.0f), mail@132: m_fixedLag(1.0f), matthiasm@6: m_outputUnvoiced(0.0f), matthiasm@70: m_preciseTime(0.0f), matthiasm@117: m_lowAmp(0.1f), matthiasm@117: m_onsetSensitivity(0.7f), matthiasm@117: m_pruneThresh(0.1f), mail@132: m_pitchHmm(0), matthiasm@0: m_pitchProb(0), matthiasm@103: m_timestamp(0), mail@133: m_level(0), mail@133: m_pitchTrack(0) matthiasm@0: { matthiasm@0: } matthiasm@0: matthiasm@36: PYinVamp::~PYinVamp() matthiasm@0: { matthiasm@0: } matthiasm@0: matthiasm@0: string matthiasm@36: PYinVamp::getIdentifier() const matthiasm@0: { matthiasm@1: return "pyin"; matthiasm@0: } matthiasm@0: matthiasm@0: string matthiasm@36: PYinVamp::getName() const matthiasm@0: { matthiasm@1: return "pYin"; matthiasm@0: } matthiasm@0: matthiasm@0: string matthiasm@36: PYinVamp::getDescription() const matthiasm@0: { matthiasm@0: return "Monophonic pitch and note tracking based on a probabilistic Yin extension."; matthiasm@0: } matthiasm@0: matthiasm@0: string matthiasm@36: PYinVamp::getMaker() const matthiasm@0: { matthiasm@0: return "Matthias Mauch"; matthiasm@0: } matthiasm@0: matthiasm@0: int matthiasm@36: PYinVamp::getPluginVersion() const matthiasm@0: { matthiasm@0: // Increment this each time you release a version that behaves matthiasm@0: // differently from the previous one Chris@143: return 3; matthiasm@0: } matthiasm@0: matthiasm@0: string matthiasm@36: PYinVamp::getCopyright() const matthiasm@0: { matthiasm@0: return "GPL"; matthiasm@0: } matthiasm@0: matthiasm@36: PYinVamp::InputDomain matthiasm@36: PYinVamp::getInputDomain() const matthiasm@0: { matthiasm@0: return TimeDomain; matthiasm@0: } matthiasm@0: matthiasm@0: size_t matthiasm@36: PYinVamp::getPreferredBlockSize() const matthiasm@0: { matthiasm@0: return 2048; matthiasm@0: } matthiasm@0: matthiasm@0: size_t matthiasm@36: PYinVamp::getPreferredStepSize() const matthiasm@0: { matthiasm@0: return 256; matthiasm@0: } matthiasm@0: matthiasm@0: size_t matthiasm@36: PYinVamp::getMinChannelCount() const matthiasm@0: { matthiasm@0: return 1; matthiasm@0: } matthiasm@0: matthiasm@0: size_t matthiasm@36: PYinVamp::getMaxChannelCount() const matthiasm@0: { matthiasm@0: return 1; matthiasm@0: } matthiasm@0: matthiasm@36: PYinVamp::ParameterList matthiasm@36: PYinVamp::getParameterDescriptors() const matthiasm@0: { matthiasm@0: ParameterList list; matthiasm@0: matthiasm@0: ParameterDescriptor d; matthiasm@0: matthiasm@0: d.identifier = "threshdistr"; matthiasm@0: d.name = "Yin threshold distribution"; matthiasm@0: d.description = "."; matthiasm@0: d.unit = ""; matthiasm@0: d.minValue = 0.0f; matthiasm@0: d.maxValue = 7.0f; matthiasm@0: d.defaultValue = 2.0f; matthiasm@0: d.isQuantized = true; matthiasm@0: d.quantizeStep = 1.0f; matthiasm@0: d.valueNames.push_back("Uniform"); matthiasm@0: d.valueNames.push_back("Beta (mean 0.10)"); matthiasm@0: d.valueNames.push_back("Beta (mean 0.15)"); matthiasm@0: d.valueNames.push_back("Beta (mean 0.20)"); matthiasm@0: d.valueNames.push_back("Beta (mean 0.30)"); matthiasm@0: d.valueNames.push_back("Single Value 0.10"); matthiasm@0: d.valueNames.push_back("Single Value 0.15"); matthiasm@0: d.valueNames.push_back("Single Value 0.20"); matthiasm@0: list.push_back(d); matthiasm@0: mail@130: d.valueNames.clear(); mail@130: mail@130: d.identifier = "fixedlag"; mail@130: d.name = "Fixed-lag smoothing"; mail@130: d.description = "Use fixed lag smoothing, not full Viterbi smoothing."; mail@130: d.unit = ""; mail@130: d.minValue = 0.0f; mail@130: d.maxValue = 1.0f; Chris@148: d.defaultValue = 1.0f; mail@130: d.isQuantized = true; mail@130: d.quantizeStep = 1.0f; mail@130: list.push_back(d); mail@130: matthiasm@0: d.identifier = "outputunvoiced"; matthiasm@0: d.valueNames.clear(); matthiasm@0: d.name = "Output estimates classified as unvoiced?"; matthiasm@0: d.description = "."; matthiasm@0: d.unit = ""; matthiasm@0: d.minValue = 0.0f; matthiasm@0: d.maxValue = 2.0f; matthiasm@6: d.defaultValue = 0.0f; matthiasm@0: d.isQuantized = true; matthiasm@0: d.quantizeStep = 1.0f; matthiasm@0: d.valueNames.push_back("No"); matthiasm@0: d.valueNames.push_back("Yes"); matthiasm@0: d.valueNames.push_back("Yes, as negative frequencies"); matthiasm@0: list.push_back(d); matthiasm@0: matthiasm@70: d.identifier = "precisetime"; matthiasm@70: d.valueNames.clear(); matthiasm@70: d.name = "Use non-standard precise YIN timing (slow)."; matthiasm@70: d.description = "."; matthiasm@70: d.unit = ""; matthiasm@70: d.minValue = 0.0f; matthiasm@70: d.maxValue = 1.0f; matthiasm@70: d.defaultValue = 0.0f; matthiasm@70: d.isQuantized = true; matthiasm@70: d.quantizeStep = 1.0f; matthiasm@70: list.push_back(d); matthiasm@70: matthiasm@72: d.identifier = "lowampsuppression"; matthiasm@72: d.valueNames.clear(); matthiasm@72: d.name = "Suppress low amplitude pitch estimates."; matthiasm@72: d.description = "."; matthiasm@72: d.unit = ""; matthiasm@72: d.minValue = 0.0f; matthiasm@72: d.maxValue = 1.0f; matthiasm@73: d.defaultValue = 0.1f; matthiasm@72: d.isQuantized = false; matthiasm@72: list.push_back(d); matthiasm@70: matthiasm@107: d.identifier = "onsetsensitivity"; matthiasm@107: d.valueNames.clear(); matthiasm@107: d.name = "Onset sensitivity"; matthiasm@107: d.description = "Adds additional note onsets when RMS increases."; matthiasm@107: d.unit = ""; matthiasm@107: d.minValue = 0.0f; matthiasm@107: d.maxValue = 1.0f; matthiasm@117: d.defaultValue = 0.7f; matthiasm@108: d.isQuantized = false; matthiasm@108: list.push_back(d); matthiasm@108: matthiasm@108: d.identifier = "prunethresh"; matthiasm@108: d.valueNames.clear(); matthiasm@108: d.name = "Duration pruning threshold."; matthiasm@108: d.description = "Prune notes that are shorter than this value."; matthiasm@108: d.unit = ""; matthiasm@108: d.minValue = 0.0f; matthiasm@108: d.maxValue = 0.2f; matthiasm@117: d.defaultValue = 0.1f; matthiasm@107: d.isQuantized = false; matthiasm@107: list.push_back(d); matthiasm@107: matthiasm@0: return list; matthiasm@0: } matthiasm@0: matthiasm@0: float matthiasm@36: PYinVamp::getParameter(string identifier) const matthiasm@0: { matthiasm@0: if (identifier == "threshdistr") { matthiasm@0: return m_threshDistr; matthiasm@0: } mail@130: if (identifier == "fixedlag") { mail@130: return m_fixedLag; mail@130: } matthiasm@0: if (identifier == "outputunvoiced") { matthiasm@0: return m_outputUnvoiced; matthiasm@0: } matthiasm@70: if (identifier == "precisetime") { matthiasm@70: return m_preciseTime; matthiasm@70: } matthiasm@72: if (identifier == "lowampsuppression") { matthiasm@72: return m_lowAmp; matthiasm@72: } matthiasm@107: if (identifier == "onsetsensitivity") { matthiasm@107: return m_onsetSensitivity; matthiasm@107: } matthiasm@108: if (identifier == "prunethresh") { matthiasm@108: return m_pruneThresh; matthiasm@108: } matthiasm@0: return 0.f; matthiasm@0: } matthiasm@0: matthiasm@0: void matthiasm@36: PYinVamp::setParameter(string identifier, float value) matthiasm@0: { matthiasm@0: if (identifier == "threshdistr") matthiasm@0: { matthiasm@0: m_threshDistr = value; matthiasm@0: } mail@130: if (identifier == "fixedlag") mail@130: { mail@130: m_fixedLag = value; mail@130: } matthiasm@0: if (identifier == "outputunvoiced") matthiasm@0: { matthiasm@0: m_outputUnvoiced = value; matthiasm@0: } matthiasm@70: if (identifier == "precisetime") matthiasm@70: { matthiasm@70: m_preciseTime = value; matthiasm@70: } matthiasm@72: if (identifier == "lowampsuppression") matthiasm@72: { matthiasm@72: m_lowAmp = value; matthiasm@72: } matthiasm@107: if (identifier == "onsetsensitivity") matthiasm@107: { matthiasm@107: m_onsetSensitivity = value; matthiasm@107: } matthiasm@108: if (identifier == "prunethresh") matthiasm@108: { matthiasm@108: m_pruneThresh = value; matthiasm@108: } matthiasm@0: } matthiasm@0: matthiasm@36: PYinVamp::ProgramList matthiasm@36: PYinVamp::getPrograms() const matthiasm@0: { matthiasm@0: ProgramList list; matthiasm@0: return list; matthiasm@0: } matthiasm@0: matthiasm@0: string matthiasm@36: PYinVamp::getCurrentProgram() const matthiasm@0: { matthiasm@0: return ""; // no programs matthiasm@0: } matthiasm@0: matthiasm@0: void Chris@138: PYinVamp::selectProgram(string) matthiasm@0: { matthiasm@0: } matthiasm@0: matthiasm@36: PYinVamp::OutputList matthiasm@36: PYinVamp::getOutputDescriptors() const matthiasm@0: { matthiasm@0: OutputList outputs; matthiasm@0: matthiasm@0: OutputDescriptor d; matthiasm@0: matthiasm@0: int outputNumber = 0; matthiasm@0: matthiasm@0: d.identifier = "f0candidates"; matthiasm@0: d.name = "F0 Candidates"; matthiasm@0: d.description = "Estimated fundamental frequency candidates."; matthiasm@0: d.unit = "Hz"; matthiasm@0: d.hasFixedBinCount = false; matthiasm@0: d.hasKnownExtents = true; matthiasm@0: d.minValue = m_fmin; matthiasm@0: d.maxValue = 500; matthiasm@0: d.isQuantized = false; matthiasm@0: d.sampleType = OutputDescriptor::FixedSampleRate; matthiasm@0: d.sampleRate = (m_inputSampleRate / m_stepSize); matthiasm@0: d.hasDuration = false; matthiasm@0: outputs.push_back(d); matthiasm@0: m_oF0Candidates = outputNumber++; matthiasm@0: matthiasm@0: d.identifier = "f0probs"; matthiasm@0: d.name = "Candidate Probabilities"; Chris@146: d.description = "Probabilities of estimated fundamental frequency candidates."; matthiasm@0: d.unit = ""; matthiasm@0: d.hasFixedBinCount = false; matthiasm@0: d.hasKnownExtents = true; matthiasm@0: d.minValue = 0; matthiasm@0: d.maxValue = 1; matthiasm@0: d.isQuantized = false; matthiasm@0: d.sampleType = OutputDescriptor::FixedSampleRate; matthiasm@0: d.sampleRate = (m_inputSampleRate / m_stepSize); matthiasm@0: d.hasDuration = false; matthiasm@0: outputs.push_back(d); matthiasm@0: m_oF0Probs = outputNumber++; matthiasm@0: matthiasm@0: d.identifier = "voicedprob"; matthiasm@0: d.name = "Voiced Probability"; matthiasm@0: d.description = "Probability that the signal is voiced according to Probabilistic Yin."; matthiasm@0: d.unit = ""; matthiasm@0: d.hasFixedBinCount = true; matthiasm@0: d.binCount = 1; matthiasm@0: d.hasKnownExtents = true; matthiasm@0: d.minValue = 0; matthiasm@0: d.maxValue = 1; matthiasm@0: d.isQuantized = false; matthiasm@0: d.sampleType = OutputDescriptor::FixedSampleRate; matthiasm@0: d.sampleRate = (m_inputSampleRate / m_stepSize); matthiasm@0: d.hasDuration = false; matthiasm@0: outputs.push_back(d); matthiasm@0: m_oVoicedProb = outputNumber++; matthiasm@0: matthiasm@0: d.identifier = "candidatesalience"; matthiasm@0: d.name = "Candidate Salience"; matthiasm@0: d.description = "Candidate Salience"; matthiasm@0: d.hasFixedBinCount = true; matthiasm@0: d.binCount = m_blockSize / 2; matthiasm@0: d.hasKnownExtents = true; matthiasm@0: d.minValue = 0; matthiasm@0: d.maxValue = 1; matthiasm@0: d.isQuantized = false; matthiasm@0: d.sampleType = OutputDescriptor::FixedSampleRate; matthiasm@0: d.sampleRate = (m_inputSampleRate / m_stepSize); matthiasm@0: d.hasDuration = false; matthiasm@0: outputs.push_back(d); matthiasm@0: m_oCandidateSalience = outputNumber++; matthiasm@0: matthiasm@0: d.identifier = "smoothedpitchtrack"; matthiasm@0: d.name = "Smoothed Pitch Track"; Chris@146: d.description = "Frame-by-frame pitch estimate after smoothing"; matthiasm@0: d.unit = "Hz"; matthiasm@0: d.hasFixedBinCount = true; matthiasm@0: d.binCount = 1; matthiasm@0: d.hasKnownExtents = false; matthiasm@0: d.isQuantized = false; matthiasm@0: d.sampleType = OutputDescriptor::FixedSampleRate; matthiasm@0: d.sampleRate = (m_inputSampleRate / m_stepSize); matthiasm@0: d.hasDuration = false; matthiasm@0: outputs.push_back(d); matthiasm@0: m_oSmoothedPitchTrack = outputNumber++; matthiasm@0: matthiasm@0: d.identifier = "notes"; matthiasm@0: d.name = "Notes"; matthiasm@0: d.description = "Derived fixed-pitch note frequencies"; matthiasm@0: d.unit = "Hz"; matthiasm@0: d.hasFixedBinCount = true; matthiasm@0: d.binCount = 1; matthiasm@0: d.hasKnownExtents = false; matthiasm@0: d.isQuantized = false; matthiasm@0: d.sampleType = OutputDescriptor::VariableSampleRate; matthiasm@0: d.sampleRate = (m_inputSampleRate / m_stepSize); matthiasm@0: d.hasDuration = true; matthiasm@0: outputs.push_back(d); matthiasm@0: m_oNotes = outputNumber++; matthiasm@0: matthiasm@0: return outputs; matthiasm@0: } matthiasm@0: matthiasm@0: bool matthiasm@36: PYinVamp::initialise(size_t channels, size_t stepSize, size_t blockSize) matthiasm@0: { matthiasm@0: if (channels < getMinChannelCount() || matthiasm@0: channels > getMaxChannelCount()) return false; matthiasm@0: matthiasm@0: m_channels = channels; matthiasm@0: m_stepSize = stepSize; matthiasm@0: m_blockSize = blockSize; matthiasm@0: matthiasm@0: reset(); matthiasm@0: matthiasm@0: return true; matthiasm@0: } matthiasm@0: matthiasm@0: void matthiasm@36: PYinVamp::reset() matthiasm@0: { matthiasm@0: m_yin.setThresholdDistr(m_threshDistr); matthiasm@0: m_yin.setFrameSize(m_blockSize); matthiasm@117: m_yin.setFast(!m_preciseTime); mail@132: Chris@150: if (m_fixedLag > 0.5f) m_pitchHmm = MonoPitchHMM(100); mail@132: else m_pitchHmm = MonoPitchHMM(0); matthiasm@0: matthiasm@0: m_pitchProb.clear(); matthiasm@0: m_timestamp.clear(); matthiasm@103: m_level.clear(); mail@133: m_pitchTrack.clear(); matthiasm@0: } matthiasm@0: matthiasm@36: PYinVamp::FeatureSet matthiasm@36: PYinVamp::process(const float *const *inputBuffers, RealTime timestamp) matthiasm@0: { matthiasm@77: int offset = m_preciseTime == 1.0 ? m_blockSize/2 : m_blockSize/4; mail@133: timestamp = timestamp + Vamp::RealTime::frame2RealTime(offset, mail@133: lrintf(m_inputSampleRate)); matthiasm@77: matthiasm@0: FeatureSet fs; matthiasm@0: matthiasm@46: float rms = 0; matthiasm@46: matthiasm@0: double *dInputBuffers = new double[m_blockSize]; matthiasm@46: for (size_t i = 0; i < m_blockSize; ++i) { matthiasm@46: dInputBuffers[i] = inputBuffers[0][i]; matthiasm@46: rms += inputBuffers[0][i] * inputBuffers[0][i]; matthiasm@46: } matthiasm@46: rms /= m_blockSize; matthiasm@46: rms = sqrt(rms); matthiasm@116: matthiasm@72: bool isLowAmplitude = (rms < m_lowAmp); matthiasm@0: matthiasm@0: Yin::YinOutput yo = m_yin.processProbabilisticYin(dInputBuffers); matthiasm@27: delete [] dInputBuffers; matthiasm@27: matthiasm@103: m_level.push_back(yo.rms); matthiasm@103: matthiasm@27: vector > tempPitchProb; matthiasm@27: for (size_t iCandidate = 0; iCandidate < yo.freqProb.size(); ++iCandidate) matthiasm@27: { matthiasm@27: double tempPitch = 12 * std::log(yo.freqProb[iCandidate].first/440)/std::log(2.) + 69; matthiasm@50: if (!isLowAmplitude) matthiasm@116: { matthiasm@46: tempPitchProb.push_back(pair matthiasm@46: (tempPitch, yo.freqProb[iCandidate].second)); matthiasm@116: } else { matthiasm@116: float factor = ((rms+0.01*m_lowAmp)/(1.01*m_lowAmp)); matthiasm@46: tempPitchProb.push_back(pair matthiasm@65: (tempPitch, yo.freqProb[iCandidate].second*factor)); matthiasm@65: } matthiasm@27: } mail@130: mail@132: vector tempObsProb = m_pitchHmm.calculateObsProb(tempPitchProb); mail@132: if (m_timestamp.empty()) mail@130: { mail@132: m_pitchHmm.initialise(tempObsProb); mail@132: } else { mail@132: m_pitchHmm.process(tempObsProb); mail@132: } mail@132: matthiasm@27: m_pitchProb.push_back(tempPitchProb); matthiasm@27: m_timestamp.push_back(timestamp); matthiasm@27: mail@132: int lag = m_pitchHmm.m_fixedLag; mail@132: Chris@150: if (m_fixedLag > 0.5f) // do fixed-lag smoothing instead of full Viterbi mail@132: { Chris@141: if (int(m_timestamp.size()) == lag + 1) mail@131: { mail@132: m_timestamp.pop_front(); mail@132: m_pitchProb.pop_front(); mail@132: mail@132: Feature f; mail@132: f.hasTimestamp = true; mail@132: vector rawPitchPath = m_pitchHmm.track(); mail@132: float freq = m_pitchHmm.nearestFreq(rawPitchPath[0], mail@132: m_pitchProb[0]); mail@133: m_pitchTrack.push_back(freq); mail@132: f.timestamp = m_timestamp[0]; mail@132: f.values.clear(); mail@132: mail@132: // different output modes mail@132: if (freq < 0 && (m_outputUnvoiced==0)) mail@132: { mail@132: mail@132: } else { mail@132: if (m_outputUnvoiced == 1) mail@132: { mail@132: f.values.push_back(fabs(freq)); mail@132: } else { mail@132: f.values.push_back(freq); mail@132: } mail@132: fs[m_oSmoothedPitchTrack].push_back(f); mail@132: } mail@131: } mail@130: } mail@132: matthiasm@27: // F0 CANDIDATES matthiasm@0: Feature f; matthiasm@0: f.hasTimestamp = true; matthiasm@0: f.timestamp = timestamp; matthiasm@0: for (size_t i = 0; i < yo.freqProb.size(); ++i) matthiasm@0: { matthiasm@0: f.values.push_back(yo.freqProb[i].first); matthiasm@0: } matthiasm@0: fs[m_oF0Candidates].push_back(f); matthiasm@0: matthiasm@27: // VOICEDPROB matthiasm@0: f.values.clear(); matthiasm@0: float voicedProb = 0; matthiasm@0: for (size_t i = 0; i < yo.freqProb.size(); ++i) matthiasm@0: { matthiasm@0: f.values.push_back(yo.freqProb[i].second); matthiasm@0: voicedProb += yo.freqProb[i].second; matthiasm@0: } matthiasm@0: fs[m_oF0Probs].push_back(f); matthiasm@0: mail@128: f.values.clear(); matthiasm@0: f.values.push_back(voicedProb); matthiasm@0: fs[m_oVoicedProb].push_back(f); matthiasm@0: matthiasm@27: // SALIENCE -- maybe this should eventually disappear matthiasm@0: f.values.clear(); matthiasm@0: float salienceSum = 0; matthiasm@0: for (size_t iBin = 0; iBin < yo.salience.size(); ++iBin) matthiasm@0: { matthiasm@0: f.values.push_back(yo.salience[iBin]); matthiasm@0: salienceSum += yo.salience[iBin]; matthiasm@0: } matthiasm@0: fs[m_oCandidateSalience].push_back(f); matthiasm@0: matthiasm@0: return fs; matthiasm@0: } matthiasm@0: matthiasm@36: PYinVamp::FeatureSet matthiasm@36: PYinVamp::getRemainingFeatures() matthiasm@0: { matthiasm@0: FeatureSet fs; Chris@146: Chris@4: if (m_pitchProb.empty()) { Chris@4: return fs; Chris@4: } Chris@4: Chris@146: Feature f; Chris@146: f.hasTimestamp = true; Chris@146: f.hasDuration = false; Chris@146: mail@131: // ================== P I T C H T R A C K ================================= mail@131: Chris@146: // NB we do this even in fixed-lag mode, as we still have the last Chris@146: // lag's-worth of pitch probs to consume Chris@146: mail@132: vector rawPitchPath = m_pitchHmm.track(); mail@131: mail@131: for (size_t iFrame = 0; iFrame < rawPitchPath.size(); ++iFrame) matthiasm@0: { mail@132: float freq = m_pitchHmm.nearestFreq(rawPitchPath[iFrame], mail@132: m_pitchProb[iFrame]); mail@133: m_pitchTrack.push_back(freq); // for note processing below Chris@146: matthiasm@0: f.timestamp = m_timestamp[iFrame]; matthiasm@0: f.values.clear(); Chris@146: mail@131: // different output modes mail@131: if (freq < 0 && (m_outputUnvoiced==0)) continue; matthiasm@0: if (m_outputUnvoiced == 1) matthiasm@0: { mail@131: f.values.push_back(fabs(freq)); matthiasm@0: } else { mail@131: f.values.push_back(freq); matthiasm@0: } matthiasm@0: fs[m_oSmoothedPitchTrack].push_back(f); matthiasm@0: } Chris@146: Chris@146: addNoteFeatures(fs); Chris@146: Chris@146: return fs; Chris@146: } Chris@146: Chris@146: void Chris@146: PYinVamp::addNoteFeatures(FeatureSet &fs) Chris@146: { matthiasm@1: std::vector > > smoothedPitch; mail@133: for (size_t iFrame = 0; iFrame < m_pitchTrack.size(); ++iFrame) { matthiasm@1: std::vector > temp; mail@133: if (m_pitchTrack[iFrame] > 0) matthiasm@1: { mail@133: double tempPitch = 12 * mail@133: std::log(m_pitchTrack[iFrame]/440)/std::log(2.) + 69; matthiasm@1: temp.push_back(std::pair(tempPitch, .9)); matthiasm@1: } matthiasm@1: smoothedPitch.push_back(temp); matthiasm@1: } mail@133: Chris@150: // In fixed-lag mode, we use fixed-lag processing for the note Chris@150: // transitions here as well as for the pitch transitions in Chris@150: // process. The main reason we provide the fixed-lag option is so Chris@150: // that we can get pitch results incrementally from process; we Chris@150: // don't get that outcome here, but we do benefit from its bounded Chris@150: // memory usage, which can be quite a big deal. So if the caller Chris@150: // asked for it there, we use it here too. (It is a bit slower, Chris@150: // but not much.) Chris@150: Chris@150: MonoNote mn(m_fixedLag > 0.5f); matthiasm@1: vector mnOut = mn.process(smoothedPitch); Chris@146: mail@133: std::cerr << "mnOut size: " << mnOut.size() << std::endl; mail@133: std::cerr << "m_pitchTrack size: " << m_pitchTrack.size() << std::endl; matthiasm@1: matthiasm@6: // turning feature into a note feature Chris@146: Feature f; matthiasm@1: f.hasTimestamp = true; matthiasm@1: f.hasDuration = true; matthiasm@1: f.values.clear(); matthiasm@6: matthiasm@6: int onsetFrame = 0; matthiasm@6: bool isVoiced = 0; matthiasm@6: bool oldIsVoiced = 0; mail@133: size_t nFrame = m_pitchTrack.size(); matthiasm@108: matthiasm@108: float minNoteFrames = (m_inputSampleRate*m_pruneThresh) / m_stepSize; matthiasm@1: mail@133: // the body of the loop below should be in a function/method mail@133: // but what does it actually do?? mail@133: // * takes the result of the note tracking HMM mail@133: // * collects contiguously pitched pitches mail@133: // * writes a note once it notices the voiced segment has ended mail@133: // complications: mail@133: // * it needs a lookahead of two frames for m_level (wtf was I thinking) mail@133: // * it needs to know the timestamp (which can be guessed from the frame no) mail@133: // * mail@133: int offset = m_preciseTime == 1.0 ? m_blockSize/2 : m_blockSize/4; mail@133: RealTime timestampOffset = Vamp::RealTime::frame2RealTime(offset, mail@133: lrintf(m_inputSampleRate)); mail@133: mail@133: std::vector notePitchTrack; // collects pitches for 1 note at a time matthiasm@6: for (size_t iFrame = 0; iFrame < nFrame; ++iFrame) matthiasm@1: { mail@133: isVoiced = mnOut[iFrame].noteState < 3 mail@133: && smoothedPitch[iFrame].size() > 0 mail@133: && (iFrame >= nFrame-2 mail@133: || ((m_level[iFrame]/m_level[iFrame+2]) > m_onsetSensitivity)); matthiasm@6: if (isVoiced && iFrame != nFrame-1) matthiasm@1: { matthiasm@6: if (oldIsVoiced == 0) // beginning of a note matthiasm@1: { matthiasm@6: onsetFrame = iFrame; matthiasm@1: } matthiasm@6: float pitch = smoothedPitch[iFrame][0].first; matthiasm@6: notePitchTrack.push_back(pitch); // add to the note's pitch track matthiasm@6: } else { // not currently voiced matthiasm@108: if (oldIsVoiced == 1) // end of note matthiasm@6: { matthiasm@108: if (notePitchTrack.size() >= minNoteFrames) matthiasm@108: { matthiasm@108: std::sort(notePitchTrack.begin(), notePitchTrack.end()); matthiasm@108: float medianPitch = notePitchTrack[notePitchTrack.size()/2]; mail@133: float medianFreq = mail@133: std::pow(2,(medianPitch - 69) / 12) * 440; matthiasm@108: f.values.clear(); matthiasm@108: f.values.push_back(medianFreq); mail@133: RealTime start = RealTime::frame2RealTime( mail@133: onsetFrame * m_stepSize, lrintf(m_inputSampleRate)) + mail@133: timestampOffset; mail@133: RealTime end = RealTime::frame2RealTime( mail@133: iFrame * m_stepSize, lrintf(m_inputSampleRate)) + mail@133: timestampOffset; mail@133: f.timestamp = start; mail@133: f.duration = end - start; matthiasm@108: fs[m_oNotes].push_back(f); matthiasm@108: } matthiasm@108: notePitchTrack.clear(); matthiasm@1: } matthiasm@1: } matthiasm@6: oldIsVoiced = isVoiced; matthiasm@1: } matthiasm@0: }