annotate core/tools/test_generic_significance_signrank.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
rev   line source
wolffd@0 1 function [p, med, avg, stats] = test_generic_significance_signrank(file1,run1,file2,run2, weighted, mode)
wolffd@0 2 %
wolffd@0 3 % [p, med] = test_generic_get_signrank(input1, run1, input2, run2, weighted, mode)
wolffd@0 4 %
wolffd@0 5 % @param mode: string determining the way results are preprocessed:
wolffd@0 6 % '', 'avgbase', {'join_datasets',[sets2join]}
wolffd@0 7 %
wolffd@0 8 % get wilcoxon signed rank of two test runs
wolffd@0 9
wolffd@0 10 if nargin < 5
wolffd@0 11 weighted = 0;
wolffd@0 12 end
wolffd@0 13 if nargin < 6
wolffd@0 14 avgbase = 0;
wolffd@0 15 end
wolffd@0 16
wolffd@0 17
wolffd@0 18
wolffd@0 19 % get individual cross-validation results
wolffd@0 20 [out1, ~, ~, indi1] = test_generic_display_results(file1, 0, 0);
wolffd@0 21
wolffd@0 22 [out2, ~, ~, indi2] = test_generic_display_results(file2, 0, 0);
wolffd@0 23 switch mode
wolffd@0 24 case 'avgbase'
wolffd@0 25
wolffd@0 26 %out2(run2).mean_ok_test
wolffd@0 27 perf1 = indi1(run1).diag.ok_test(weighted+1,:);
wolffd@0 28 perf2 = indi2(run2).diag.ok_test(weighted+1,:);
wolffd@0 29 p2avg = mean(perf2);
wolffd@0 30 perf_diffs = perf1 - p2avg;
wolffd@0 31
wolffd@0 32
wolffd@0 33 case 'join_datasets'
wolffd@0 34 %out2(run2).mean_ok_test
wolffd@0 35
wolffd@0 36 % get the dataset indices which are to join
wolffd@0 37 if isempty(run1) | (run1 < 1)
wolffd@0 38 sets2join1 = 1:numel(indi1);
wolffd@0 39 else
wolffd@0 40 sets2join1 = run1;
wolffd@0 41 end
wolffd@0 42 if isempty(run2) | (run2 < 1)
wolffd@0 43 sets2join2 = 1:numel(indi2);
wolffd@0 44 else
wolffd@0 45 sets2join2 = run2;
wolffd@0 46 end
wolffd@0 47
wolffd@0 48 perf1 = join_datasets(indi1,weighted,sets2join1);
wolffd@0 49 perf2 = join_datasets(indi2,weighted,sets2join2);
wolffd@0 50 perf_diffs = perf1 - perf2;
wolffd@0 51
wolffd@0 52 otherwise
wolffd@0 53 % get difference of individual results
wolffd@0 54 perf_diffs = indi1(run1).diag.ok_test(weighted+1,:) - indi2(run2).diag.ok_test(weighted+1,:);
wolffd@0 55 end
wolffd@0 56
wolffd@0 57 [p, h, stats] = signrank(perf_diffs );
wolffd@0 58 % [p, h] = signtest(perf1,perf2);
wolffd@0 59 % [h, p, stats] = kstest2(perf1,perf2);
wolffd@0 60 % [h2,p2] = ttest(perf_diffs);
wolffd@0 61
wolffd@0 62 % get median of performance difference
wolffd@0 63 med = median(perf_diffs);
wolffd@0 64
wolffd@0 65 % get median of performance difference
wolffd@0 66 avg = mean(perf_diffs);
wolffd@0 67
wolffd@0 68 % output interpretation
wolffd@0 69 if h && med > 0
wolffd@0 70 fprintf('input 1 >> input 2 :)\n');
wolffd@0 71 elseif h && med < 0
wolffd@0 72 fprintf('input 2 >> input 1 ;)\n');
wolffd@0 73 else
wolffd@0 74 fprintf('no significant difference :( \n');
wolffd@0 75 end
wolffd@0 76
wolffd@0 77
wolffd@0 78 % ---
wolffd@0 79 % joins the test data to a single performance vector
wolffd@0 80 % ---
wolffd@0 81 function perf = join_datasets(indi,weighted, sets2join)
wolffd@0 82 perf = [];
wolffd@0 83
wolffd@0 84 for i = sets2join
wolffd@0 85 perf = [perf indi(i).diag.ok_test(weighted+1,:)];
wolffd@0 86 end
wolffd@0 87
wolffd@0 88