Mercurial > hg > camir-aes2014
comparison core/magnatagatune/tests_evals/test_generic_display_results_absviolated.m @ 0:e9a9cd732c1e tip
first hg version after svn
author | wolffd |
---|---|
date | Tue, 10 Feb 2015 15:05:51 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:e9a9cd732c1e |
---|---|
1 function out = test_generic_display_results_absviolated(file) | |
2 % out = test_generic_display_results_absviolated(file) | |
3 % | |
4 % displays the finalresults mat file and enables | |
5 % further analysis and duiagnostics of the individual runs | |
6 | |
7 global db_MTTAudioFeatureSlaney08; | |
8 global db_magnamixedfeat_genrebasicsm; | |
9 global db_MTTMixedFeatureGenreBasicSmPCA; | |
10 | |
11 | |
12 global comparison; | |
13 global comparison_ids; | |
14 | |
15 if nargin < 1 || isempty(file) || isnumeric(file) | |
16 u = dir(); | |
17 u = {u.name}; | |
18 [idx, strpos] = substrcellfind(u, '_finalresults.mat', 1); | |
19 | |
20 if exist('file','var') && isnumeric(file) | |
21 file = u{idx(file)}; | |
22 else | |
23 file = u{idx(1)}; | |
24 end | |
25 end | |
26 | |
27 load(file); | |
28 | |
29 % --- | |
30 % % get statistics for feature parameters | |
31 % Visualise the accuracy and variance | |
32 % --- | |
33 if isfield(out, 'inctrain') | |
34 for i = 1:numel(out) | |
35 | |
36 % --- | |
37 % get training and test sample sizes | |
38 % --- | |
39 nData = []; | |
40 n_train_data = zeros(1,numel(out(i).inctrain.dataPartition)); | |
41 n_test_data = zeros(1,numel(out(i).inctrain.dataPartition)); | |
42 for j = 1:numel(out(i).inctrain.dataPartition) | |
43 n_train_data(j) = mean(out(i).inctrain.dataPartition(j).TrainSize); | |
44 n_test_data(j) = mean(out(i).inctrain.dataPartition(j).TestSize); | |
45 end | |
46 | |
47 % --- | |
48 % get lost percentages | |
49 % --- | |
50 mean_lost_test = 1 - [out(i).inctrain.mean_ok_test]; | |
51 mean_lost_test = mean_lost_test(1,:).* n_test_data; | |
52 | |
53 var_ok_test = sqrt([out(i).inctrain.var_ok_test]); | |
54 | |
55 mean_lost_train = 1 - [out(i).inctrain.mean_ok_train]; | |
56 mean_lost_train = mean_lost_train(1,:).* n_train_data; | |
57 | |
58 % plot test results | |
59 figure; | |
60 subplot(2,1,1) | |
61 plot(n_train_data, mean_lost_test,'r'); | |
62 hold; | |
63 % plot training results | |
64 plot(n_train_data, mean_lost_train,'m'); | |
65 | |
66 xlabel ('# training constraints'); | |
67 ylabel ('# constraints violated'); | |
68 legend ('test','training'); | |
69 | |
70 plot(n_train_data, mean_lost_test + var_ok_test(1,:).* n_test_data,'r:'); | |
71 plot(n_train_data, mean_lost_test - (var_ok_test(1,:).* n_test_data),'r:'); | |
72 | |
73 % --- | |
74 % get percentage of unknown data examples learnt | |
75 % --- | |
76 lost_test_not_in_train = mean_lost_test - mean_lost_train; | |
77 ntest_not_in_train = n_test_data - n_train_data; | |
78 | |
79 lost_test_not_in_train = lost_test_not_in_train ./ ntest_not_in_train; | |
80 | |
81 lost_test_not_in_train(isnan(lost_test_not_in_train)) = 0; | |
82 | |
83 subplot(2,1,2) | |
84 plot(n_train_data, lost_test_not_in_train); | |
85 | |
86 xlabel ('# training constraints'); | |
87 ylabel ('% unknown constraints violated'); | |
88 end | |
89 end | |
90 | |
91 | |
92 % --- | |
93 % write max. training success | |
94 % --- | |
95 mean_ok_test = [out.mean_ok_test]; | |
96 [val, idx] = max(mean_ok_test(1,:)); | |
97 fprintf(' --- Maximal training success: nr. %d, %3.2f percent. --- \n', idx, val * 100) | |
98 | |
99 end |