Mercurial > hg > svapp
changeset 629:10046d544e76
Further work on #1773 "Loading a session with features extracted from multiplexed inputs". Re-read the aggregate wave models from the session file; also re-order the way they are written so as to improve the likelihood of successfully re-reading them (! - as it stood before, there was some chance involved)
author | Chris Cannam |
---|---|
date | Mon, 15 Oct 2018 15:50:39 +0100 |
parents | b936872faff2 |
children | ca64cc9047d0 |
files | framework/Document.cpp framework/SVFileReader.cpp framework/SVFileReader.h |
diffstat | 3 files changed, 167 insertions(+), 49 deletions(-) [+] |
line wrap: on
line diff
--- a/framework/Document.cpp Wed Oct 10 08:44:37 2018 +0100 +++ b/framework/Document.cpp Mon Oct 15 15:50:39 2018 +0100 @@ -1305,6 +1305,15 @@ // Write aggregate models first, so that when re-reading // derivations we already know about their existence. But only // those that are actually used + // + // Later note: This turns out not to be a great idea - we can't + // use an aggregate model to drive a derivation unless its + // component models have all also already been loaded. So we + // really should have written non-aggregate read-only + // (i.e. non-derived) wave-type models first, then aggregate + // models, then models that have derivations. But we didn't do + // that, so existing sessions will always have the aggregate + // models first and we might as well stick with that. for (std::set<Model *>::iterator i = m_aggregateModels.begin(); i != m_aggregateModels.end(); ++i) { @@ -1325,64 +1334,86 @@ std::set<Model *> written; - for (ModelMap::const_iterator i = m_models.begin(); - i != m_models.end(); ++i) { + // Now write the other models in two passes: first the models that + // aren't derived from anything (in case they are source + // components for an aggregate model, in which case we need to + // have seen them before we see any models derived from aggregates + // that use them - see the lament above) and then the models that + // have derivations. - Model *model = i->first; - const ModelRecord &rec = i->second; + const int nonDerivedPass = 0, derivedPass = 1; + for (int pass = nonDerivedPass; pass <= derivedPass; ++pass) { + + for (ModelMap::const_iterator i = m_models.begin(); + i != m_models.end(); ++i) { - if (used.find(model) == used.end()) continue; + Model *model = i->first; + const ModelRecord &rec = i->second; + + if (used.find(model) == used.end()) continue; - // We need an intelligent way to determine which models need - // to be streamed (i.e. have been edited, or are small) and - // which should not be (i.e. remain as generated by a - // transform, and are large). - // - // At the moment we can get away with deciding not to stream - // dense 3d models or writable wave file models, provided they - // were generated from a transform, because at the moment there - // is no way to edit those model types so it should be safe to - // regenerate them. That won't always work in future though. - // It would be particularly nice to be able to ask the user, - // as well as making an intelligent guess. + // We need an intelligent way to determine which models + // need to be streamed (i.e. have been edited, or are + // small) and which should not be (i.e. remain as + // generated by a transform, and are large). + // + // At the moment we can get away with deciding not to + // stream dense 3d models or writable wave file models, + // provided they were generated from a transform, because + // at the moment there is no way to edit those model types + // so it should be safe to regenerate them. That won't + // always work in future though. It would be particularly + // nice to be able to ask the user, as well as making an + // intelligent guess. - bool writeModel = true; - bool haveDerivation = false; + bool writeModel = true; + bool haveDerivation = false; + + if (rec.source && rec.transform.getIdentifier() != "") { + haveDerivation = true; + } - if (rec.source && rec.transform.getIdentifier() != "") { - haveDerivation = true; - } + if (pass == nonDerivedPass) { + if (haveDerivation) { + SVDEBUG << "skipping derived model " << model->objectName() << " during nonDerivedPass" << endl; + continue; + } + } else { + if (!haveDerivation) { + SVDEBUG << "skipping non-derived model " << model->objectName() << " during derivedPass" << endl; + continue; + } + } - if (haveDerivation) { - if (dynamic_cast<const WritableWaveFileModel *>(model)) { - writeModel = false; - } else if (dynamic_cast<const DenseThreeDimensionalModel *>(model)) { - writeModel = false; + if (haveDerivation) { + if (dynamic_cast<const WritableWaveFileModel *>(model)) { + writeModel = false; + } else if (dynamic_cast<const DenseThreeDimensionalModel *>(model)) { + writeModel = false; + } + } + + if (writeModel) { + model->toXml(out, indent + " "); + written.insert(model); + } + + if (haveDerivation) { + writeBackwardCompatibleDerivation(out, indent + " ", + model, rec); + } + + //!!! We should probably own the PlayParameterRepository + PlayParameters *playParameters = + PlayParameterRepository::getInstance()->getPlayParameters(model); + if (playParameters) { + playParameters->toXml + (out, indent + " ", + QString("model=\"%1\"") + .arg(XmlExportable::getObjectExportId(model))); } } - - if (writeModel) { - model->toXml(out, indent + " "); - written.insert(model); - } - - if (haveDerivation) { - writeBackwardCompatibleDerivation(out, indent + " ", - model, rec); - } - - //!!! We should probably own the PlayParameterRepository - PlayParameters *playParameters = - PlayParameterRepository::getInstance()->getPlayParameters(model); - if (playParameters) { - playParameters->toXml - (out, indent + " ", - QString("model=\"%1\"") - .arg(XmlExportable::getObjectExportId(model))); - } } - - //!!! // We should write out the alignment models here. AlignmentModel // needs a toXml that writes out the export IDs of its reference
--- a/framework/SVFileReader.cpp Wed Oct 10 08:44:37 2018 +0100 +++ b/framework/SVFileReader.cpp Mon Oct 15 15:50:39 2018 +0100 @@ -36,6 +36,7 @@ #include "data/model/TextModel.h" #include "data/model/ImageModel.h" #include "data/model/AlignmentModel.h" +#include "data/model/AggregateWaveModel.h" #include "transform/TransformFactory.h" @@ -209,6 +210,7 @@ } else if (name == "derivation") { + makeAggregateModels(); // must be done before derivations that use them ok = readDerivation(attributes); } else if (name == "playparameters") { @@ -403,8 +405,60 @@ } void +SVFileReader::makeAggregateModels() +{ + std::map<int, PendingAggregateRec> stillPending; + + for (auto p: m_pendingAggregates) { + + int id = p.first; + const PendingAggregateRec &rec = p.second; + bool skip = false; + + AggregateWaveModel::ChannelSpecList specs; + for (int componentId: rec.components) { + bool found = false; + if (m_models.find(componentId) != m_models.end()) { + RangeSummarisableTimeValueModel *rs = + dynamic_cast<RangeSummarisableTimeValueModel *> + (m_models[componentId]); + if (rs) { + specs.push_back(AggregateWaveModel::ModelChannelSpec + (rs, -1)); + found = true; + } + } + if (!found) { + SVDEBUG << "SVFileReader::makeAggregateModels:" + << "Unknown component model id " + << componentId << " in aggregate model id " << id + << ", hoping we won't be needing it just yet" + << endl; + skip = true; + } + } + + if (skip) { + stillPending[id] = rec; + } else { + AggregateWaveModel *model = new AggregateWaveModel(specs); + model->setObjectName(rec.name); + + SVDEBUG << "SVFileReader::makeAggregateModels: created aggregate model id " + << id << " with " << specs.size() << " components" << endl; + + m_models[id] = model; + } + } + + m_pendingAggregates = stillPending; +} + +void SVFileReader::addUnaddedModels() { + makeAggregateModels(); + std::set<Model *> unaddedModels; for (std::map<int, Model *>::iterator i = m_models.begin(); @@ -510,6 +564,30 @@ return true; + } else if (type == "aggregatewave") { + + QString components = attributes.value("components"); + QStringList componentIdStrings = components.split(","); + std::vector<int> componentIds; + for (auto cid: componentIdStrings) { + bool ok = false; + int id = cid.toInt(&ok); + if (!ok) { + SVCERR << "SVFileReader::readModel: Failed to convert component model id from part \"" << cid << "\" in \"" << components << "\"" << endl; + } else { + componentIds.push_back(id); + } + } + PendingAggregateRec rec { name, sampleRate, componentIds }; + m_pendingAggregates[id] = rec; + + // The aggregate model will be constructed from its pending + // record in makeAggregateModels; it can't happen here because + // the component models might not all have been observed yet + // (an unfortunate accident of the way the file is written) + + return true; + } else if (type == "dense") { READ_MANDATORY(int, dimensions, toInt);
--- a/framework/SVFileReader.h Wed Oct 10 08:44:37 2018 +0100 +++ b/framework/SVFileReader.h Mon Oct 15 15:50:39 2018 +0100 @@ -229,12 +229,20 @@ bool readParameter(const QXmlAttributes &); bool readSelection(const QXmlAttributes &); bool readMeasurement(const QXmlAttributes &); + + void makeAggregateModels(); void addUnaddedModels(); bool haveModel(int id) { return (m_models.find(id) != m_models.end()) && m_models[id]; } + struct PendingAggregateRec { + QString name; + sv_samplerate_t sampleRate; + std::vector<int> components; + }; + Document *m_document; SVFileReaderPaneCallback &m_paneCallback; QString m_location; @@ -242,6 +250,7 @@ std::map<int, Layer *> m_layers; std::map<int, Model *> m_models; std::set<Model *> m_addedModels; + std::map<int, PendingAggregateRec> m_pendingAggregates; std::map<int, int> m_awaitingDatasets; // map dataset id -> model id Layer *m_currentLayer; Model *m_currentDataset;