wolffd@0: function out = test_generic_display_results_absviolated(file) wolffd@0: % out = test_generic_display_results_absviolated(file) wolffd@0: % wolffd@0: % displays the finalresults mat file and enables wolffd@0: % further analysis and duiagnostics of the individual runs wolffd@0: wolffd@0: global db_MTTAudioFeatureSlaney08; wolffd@0: global db_magnamixedfeat_genrebasicsm; wolffd@0: global db_MTTMixedFeatureGenreBasicSmPCA; wolffd@0: wolffd@0: wolffd@0: global comparison; wolffd@0: global comparison_ids; wolffd@0: wolffd@0: if nargin < 1 || isempty(file) || isnumeric(file) wolffd@0: u = dir(); wolffd@0: u = {u.name}; wolffd@0: [idx, strpos] = substrcellfind(u, '_finalresults.mat', 1); wolffd@0: wolffd@0: if exist('file','var') && isnumeric(file) wolffd@0: file = u{idx(file)}; wolffd@0: else wolffd@0: file = u{idx(1)}; wolffd@0: end wolffd@0: end wolffd@0: wolffd@0: load(file); wolffd@0: wolffd@0: % --- wolffd@0: % % get statistics for feature parameters wolffd@0: % Visualise the accuracy and variance wolffd@0: % --- wolffd@0: if isfield(out, 'inctrain') wolffd@0: for i = 1:numel(out) wolffd@0: wolffd@0: % --- wolffd@0: % get training and test sample sizes wolffd@0: % --- wolffd@0: nData = []; wolffd@0: n_train_data = zeros(1,numel(out(i).inctrain.dataPartition)); wolffd@0: n_test_data = zeros(1,numel(out(i).inctrain.dataPartition)); wolffd@0: for j = 1:numel(out(i).inctrain.dataPartition) wolffd@0: n_train_data(j) = mean(out(i).inctrain.dataPartition(j).TrainSize); wolffd@0: n_test_data(j) = mean(out(i).inctrain.dataPartition(j).TestSize); wolffd@0: end wolffd@0: wolffd@0: % --- wolffd@0: % get lost percentages wolffd@0: % --- wolffd@0: mean_lost_test = 1 - [out(i).inctrain.mean_ok_test]; wolffd@0: mean_lost_test = mean_lost_test(1,:).* n_test_data; wolffd@0: wolffd@0: var_ok_test = sqrt([out(i).inctrain.var_ok_test]); wolffd@0: wolffd@0: mean_lost_train = 1 - [out(i).inctrain.mean_ok_train]; wolffd@0: mean_lost_train = mean_lost_train(1,:).* n_train_data; wolffd@0: wolffd@0: % plot test results wolffd@0: figure; wolffd@0: subplot(2,1,1) wolffd@0: plot(n_train_data, mean_lost_test,'r'); wolffd@0: hold; wolffd@0: % plot training results wolffd@0: plot(n_train_data, mean_lost_train,'m'); wolffd@0: wolffd@0: xlabel ('# training constraints'); wolffd@0: ylabel ('# constraints violated'); wolffd@0: legend ('test','training'); wolffd@0: wolffd@0: plot(n_train_data, mean_lost_test + var_ok_test(1,:).* n_test_data,'r:'); wolffd@0: plot(n_train_data, mean_lost_test - (var_ok_test(1,:).* n_test_data),'r:'); wolffd@0: wolffd@0: % --- wolffd@0: % get percentage of unknown data examples learnt wolffd@0: % --- wolffd@0: lost_test_not_in_train = mean_lost_test - mean_lost_train; wolffd@0: ntest_not_in_train = n_test_data - n_train_data; wolffd@0: wolffd@0: lost_test_not_in_train = lost_test_not_in_train ./ ntest_not_in_train; wolffd@0: wolffd@0: lost_test_not_in_train(isnan(lost_test_not_in_train)) = 0; wolffd@0: wolffd@0: subplot(2,1,2) wolffd@0: plot(n_train_data, lost_test_not_in_train); wolffd@0: wolffd@0: xlabel ('# training constraints'); wolffd@0: ylabel ('% unknown constraints violated'); wolffd@0: end wolffd@0: end wolffd@0: wolffd@0: wolffd@0: % --- wolffd@0: % write max. training success wolffd@0: % --- wolffd@0: mean_ok_test = [out.mean_ok_test]; wolffd@0: [val, idx] = max(mean_ok_test(1,:)); wolffd@0: fprintf(' --- Maximal training success: nr. %d, %3.2f percent. --- \n', idx, val * 100) wolffd@0: wolffd@0: end