Mercurial > hg > svapp
changeset 631:146dbfac36a3 zoom
Merge from default branch
author | Chris Cannam |
---|---|
date | Tue, 06 Nov 2018 08:59:08 +0000 |
parents | 7eafe977a1fc (current diff) ca64cc9047d0 (diff) |
children | |
files | |
diffstat | 3 files changed, 168 insertions(+), 50 deletions(-) [+] |
line wrap: on
line diff
--- a/framework/Document.cpp Tue Oct 09 15:55:26 2018 +0100 +++ b/framework/Document.cpp Tue Nov 06 08:59:08 2018 +0000 @@ -1305,6 +1305,15 @@ // Write aggregate models first, so that when re-reading // derivations we already know about their existence. But only // those that are actually used + // + // Later note: This turns out not to be a great idea - we can't + // use an aggregate model to drive a derivation unless its + // component models have all also already been loaded. So we + // really should have written non-aggregate read-only + // (i.e. non-derived) wave-type models first, then aggregate + // models, then models that have derivations. But we didn't do + // that, so existing sessions will always have the aggregate + // models first and we might as well stick with that. for (std::set<Model *>::iterator i = m_aggregateModels.begin(); i != m_aggregateModels.end(); ++i) { @@ -1325,64 +1334,86 @@ std::set<Model *> written; - for (ModelMap::const_iterator i = m_models.begin(); - i != m_models.end(); ++i) { + // Now write the other models in two passes: first the models that + // aren't derived from anything (in case they are source + // components for an aggregate model, in which case we need to + // have seen them before we see any models derived from aggregates + // that use them - see the lament above) and then the models that + // have derivations. - Model *model = i->first; - const ModelRecord &rec = i->second; + const int nonDerivedPass = 0, derivedPass = 1; + for (int pass = nonDerivedPass; pass <= derivedPass; ++pass) { + + for (ModelMap::const_iterator i = m_models.begin(); + i != m_models.end(); ++i) { - if (used.find(model) == used.end()) continue; + Model *model = i->first; + const ModelRecord &rec = i->second; + + if (used.find(model) == used.end()) continue; - // We need an intelligent way to determine which models need - // to be streamed (i.e. have been edited, or are small) and - // which should not be (i.e. remain as generated by a - // transform, and are large). - // - // At the moment we can get away with deciding not to stream - // dense 3d models or writable wave file models, provided they - // were generated from a transform, because at the moment there - // is no way to edit those model types so it should be safe to - // regenerate them. That won't always work in future though. - // It would be particularly nice to be able to ask the user, - // as well as making an intelligent guess. + // We need an intelligent way to determine which models + // need to be streamed (i.e. have been edited, or are + // small) and which should not be (i.e. remain as + // generated by a transform, and are large). + // + // At the moment we can get away with deciding not to + // stream dense 3d models or writable wave file models, + // provided they were generated from a transform, because + // at the moment there is no way to edit those model types + // so it should be safe to regenerate them. That won't + // always work in future though. It would be particularly + // nice to be able to ask the user, as well as making an + // intelligent guess. - bool writeModel = true; - bool haveDerivation = false; + bool writeModel = true; + bool haveDerivation = false; + + if (rec.source && rec.transform.getIdentifier() != "") { + haveDerivation = true; + } - if (rec.source && rec.transform.getIdentifier() != "") { - haveDerivation = true; - } + if (pass == nonDerivedPass) { + if (haveDerivation) { + SVDEBUG << "skipping derived model " << model->objectName() << " during nonDerivedPass" << endl; + continue; + } + } else { + if (!haveDerivation) { + SVDEBUG << "skipping non-derived model " << model->objectName() << " during derivedPass" << endl; + continue; + } + } - if (haveDerivation) { - if (dynamic_cast<const WritableWaveFileModel *>(model)) { - writeModel = false; - } else if (dynamic_cast<const DenseThreeDimensionalModel *>(model)) { - writeModel = false; + if (haveDerivation) { + if (dynamic_cast<const WritableWaveFileModel *>(model)) { + writeModel = false; + } else if (dynamic_cast<const DenseThreeDimensionalModel *>(model)) { + writeModel = false; + } + } + + if (writeModel) { + model->toXml(out, indent + " "); + written.insert(model); + } + + if (haveDerivation) { + writeBackwardCompatibleDerivation(out, indent + " ", + model, rec); + } + + //!!! We should probably own the PlayParameterRepository + PlayParameters *playParameters = + PlayParameterRepository::getInstance()->getPlayParameters(model); + if (playParameters) { + playParameters->toXml + (out, indent + " ", + QString("model=\"%1\"") + .arg(XmlExportable::getObjectExportId(model))); } } - - if (writeModel) { - model->toXml(out, indent + " "); - written.insert(model); - } - - if (haveDerivation) { - writeBackwardCompatibleDerivation(out, indent + " ", - model, rec); - } - - //!!! We should probably own the PlayParameterRepository - PlayParameters *playParameters = - PlayParameterRepository::getInstance()->getPlayParameters(model); - if (playParameters) { - playParameters->toXml - (out, indent + " ", - QString("model=\"%1\"") - .arg(XmlExportable::getObjectExportId(model))); - } } - - //!!! // We should write out the alignment models here. AlignmentModel // needs a toXml that writes out the export IDs of its reference
--- a/framework/SVFileReader.cpp Tue Oct 09 15:55:26 2018 +0100 +++ b/framework/SVFileReader.cpp Tue Nov 06 08:59:08 2018 +0000 @@ -36,6 +36,7 @@ #include "data/model/TextModel.h" #include "data/model/ImageModel.h" #include "data/model/AlignmentModel.h" +#include "data/model/AggregateWaveModel.h" #include "transform/TransformFactory.h" @@ -209,6 +210,7 @@ } else if (name == "derivation") { + makeAggregateModels(); // must be done before derivations that use them ok = readDerivation(attributes); } else if (name == "playparameters") { @@ -403,8 +405,60 @@ } void +SVFileReader::makeAggregateModels() +{ + std::map<int, PendingAggregateRec> stillPending; + + for (auto p: m_pendingAggregates) { + + int id = p.first; + const PendingAggregateRec &rec = p.second; + bool skip = false; + + AggregateWaveModel::ChannelSpecList specs; + for (int componentId: rec.components) { + bool found = false; + if (m_models.find(componentId) != m_models.end()) { + RangeSummarisableTimeValueModel *rs = + dynamic_cast<RangeSummarisableTimeValueModel *> + (m_models[componentId]); + if (rs) { + specs.push_back(AggregateWaveModel::ModelChannelSpec + (rs, -1)); + found = true; + } + } + if (!found) { + SVDEBUG << "SVFileReader::makeAggregateModels:" + << "Unknown component model id " + << componentId << " in aggregate model id " << id + << ", hoping we won't be needing it just yet" + << endl; + skip = true; + } + } + + if (skip) { + stillPending[id] = rec; + } else { + AggregateWaveModel *model = new AggregateWaveModel(specs); + model->setObjectName(rec.name); + + SVDEBUG << "SVFileReader::makeAggregateModels: created aggregate model id " + << id << " with " << specs.size() << " components" << endl; + + m_models[id] = model; + } + } + + m_pendingAggregates = stillPending; +} + +void SVFileReader::addUnaddedModels() { + makeAggregateModels(); + std::set<Model *> unaddedModels; for (std::map<int, Model *>::iterator i = m_models.begin(); @@ -510,6 +564,30 @@ return true; + } else if (type == "aggregatewave") { + + QString components = attributes.value("components"); + QStringList componentIdStrings = components.split(","); + std::vector<int> componentIds; + for (auto cid: componentIdStrings) { + bool ok = false; + int id = cid.toInt(&ok); + if (!ok) { + SVCERR << "SVFileReader::readModel: Failed to convert component model id from part \"" << cid << "\" in \"" << components << "\"" << endl; + } else { + componentIds.push_back(id); + } + } + PendingAggregateRec rec { name, sampleRate, componentIds }; + m_pendingAggregates[id] = rec; + + // The aggregate model will be constructed from its pending + // record in makeAggregateModels; it can't happen here because + // the component models might not all have been observed yet + // (an unfortunate accident of the way the file is written) + + return true; + } else if (type == "dense") { READ_MANDATORY(int, dimensions, toInt); @@ -749,7 +827,7 @@ m_currentPane = m_paneCallback.addPane(); - SVCERR << "SVFileReader::addPane: pane is " << m_currentPane << endl; + SVDEBUG << "SVFileReader::addPane: pane is " << m_currentPane << endl; if (!m_currentPane) { SVCERR << "WARNING: SV-XML: Internal error: Failed to add pane!"
--- a/framework/SVFileReader.h Tue Oct 09 15:55:26 2018 +0100 +++ b/framework/SVFileReader.h Tue Nov 06 08:59:08 2018 +0000 @@ -229,12 +229,20 @@ bool readParameter(const QXmlAttributes &); bool readSelection(const QXmlAttributes &); bool readMeasurement(const QXmlAttributes &); + + void makeAggregateModels(); void addUnaddedModels(); bool haveModel(int id) { return (m_models.find(id) != m_models.end()) && m_models[id]; } + struct PendingAggregateRec { + QString name; + sv_samplerate_t sampleRate; + std::vector<int> components; + }; + Document *m_document; SVFileReaderPaneCallback &m_paneCallback; QString m_location; @@ -242,6 +250,7 @@ std::map<int, Layer *> m_layers; std::map<int, Model *> m_models; std::set<Model *> m_addedModels; + std::map<int, PendingAggregateRec> m_pendingAggregates; std::map<int, int> m_awaitingDatasets; // map dataset id -> model id Layer *m_currentLayer; Model *m_currentDataset;