Mercurial > hg > camir-aes2014
diff core/magnatagatune/tests_evals/test_generic_display_results_absviolated.m @ 0:e9a9cd732c1e tip
first hg version after svn
author | wolffd |
---|---|
date | Tue, 10 Feb 2015 15:05:51 +0000 |
parents | |
children |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/test_generic_display_results_absviolated.m Tue Feb 10 15:05:51 2015 +0000 @@ -0,0 +1,99 @@ +function out = test_generic_display_results_absviolated(file) +% out = test_generic_display_results_absviolated(file) +% +% displays the finalresults mat file and enables +% further analysis and duiagnostics of the individual runs + +global db_MTTAudioFeatureSlaney08; +global db_magnamixedfeat_genrebasicsm; +global db_MTTMixedFeatureGenreBasicSmPCA; + + +global comparison; +global comparison_ids; + +if nargin < 1 || isempty(file) || isnumeric(file) + u = dir(); + u = {u.name}; + [idx, strpos] = substrcellfind(u, '_finalresults.mat', 1); + + if exist('file','var') && isnumeric(file) + file = u{idx(file)}; + else + file = u{idx(1)}; + end +end + +load(file); + +% --- +% % get statistics for feature parameters +% Visualise the accuracy and variance +% --- +if isfield(out, 'inctrain') + for i = 1:numel(out) + + % --- + % get training and test sample sizes + % --- + nData = []; + n_train_data = zeros(1,numel(out(i).inctrain.dataPartition)); + n_test_data = zeros(1,numel(out(i).inctrain.dataPartition)); + for j = 1:numel(out(i).inctrain.dataPartition) + n_train_data(j) = mean(out(i).inctrain.dataPartition(j).TrainSize); + n_test_data(j) = mean(out(i).inctrain.dataPartition(j).TestSize); + end + + % --- + % get lost percentages + % --- + mean_lost_test = 1 - [out(i).inctrain.mean_ok_test]; + mean_lost_test = mean_lost_test(1,:).* n_test_data; + + var_ok_test = sqrt([out(i).inctrain.var_ok_test]); + + mean_lost_train = 1 - [out(i).inctrain.mean_ok_train]; + mean_lost_train = mean_lost_train(1,:).* n_train_data; + + % plot test results + figure; + subplot(2,1,1) + plot(n_train_data, mean_lost_test,'r'); + hold; + % plot training results + plot(n_train_data, mean_lost_train,'m'); + + xlabel ('# training constraints'); + ylabel ('# constraints violated'); + legend ('test','training'); + + plot(n_train_data, mean_lost_test + var_ok_test(1,:).* n_test_data,'r:'); + plot(n_train_data, mean_lost_test - (var_ok_test(1,:).* n_test_data),'r:'); + + % --- + % get percentage of unknown data examples learnt + % --- + lost_test_not_in_train = mean_lost_test - mean_lost_train; + ntest_not_in_train = n_test_data - n_train_data; + + lost_test_not_in_train = lost_test_not_in_train ./ ntest_not_in_train; + + lost_test_not_in_train(isnan(lost_test_not_in_train)) = 0; + + subplot(2,1,2) + plot(n_train_data, lost_test_not_in_train); + + xlabel ('# training constraints'); + ylabel ('% unknown constraints violated'); + end +end + + +% --- +% write max. training success +% --- +mean_ok_test = [out.mean_ok_test]; +[val, idx] = max(mean_ok_test(1,:)); +fprintf(' --- Maximal training success: nr. %d, %3.2f percent. --- \n', idx, val * 100) + +end