view core/tools/test_generic_significance_signrank.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
line wrap: on
line source
function [p, med, avg, stats] = test_generic_significance_signrank(file1,run1,file2,run2, weighted, mode)
% 
% [p, med] = test_generic_get_signrank(input1, run1, input2, run2, weighted, mode)
%
% @param mode: string determining the way results are preprocessed: 
%              '', 'avgbase', {'join_datasets',[sets2join]}           
%
% get wilcoxon signed rank of two test runs

if nargin < 5
    weighted = 0;
end
if nargin < 6
    avgbase = 0;
end



% get individual cross-validation results
[out1, ~, ~, indi1] = test_generic_display_results(file1, 0, 0);

[out2, ~, ~, indi2] = test_generic_display_results(file2, 0, 0);
switch mode
    case 'avgbase'
    
        %out2(run2).mean_ok_test
        perf1 = indi1(run1).diag.ok_test(weighted+1,:);
        perf2 = indi2(run2).diag.ok_test(weighted+1,:);
        p2avg = mean(perf2);
        perf_diffs = perf1 - p2avg;


    case 'join_datasets'
        %out2(run2).mean_ok_test
        
        % get the dataset indices which are to join
        if isempty(run1) | (run1 < 1)
            sets2join1 = 1:numel(indi1);
        else
            sets2join1 = run1;
        end
        if isempty(run2) | (run2 < 1)
            sets2join2 = 1:numel(indi2);
        else
            sets2join2 = run2;
        end
        
        perf1 = join_datasets(indi1,weighted,sets2join1);
        perf2 = join_datasets(indi2,weighted,sets2join2);
        perf_diffs = perf1 - perf2;

    otherwise
        % get difference of individual results
        perf_diffs = indi1(run1).diag.ok_test(weighted+1,:) - indi2(run2).diag.ok_test(weighted+1,:);       
end

[p, h, stats] = signrank(perf_diffs );
% [p, h] = signtest(perf1,perf2);
% [h, p, stats] = kstest2(perf1,perf2);
% [h2,p2] = ttest(perf_diffs);
        
% get median of performance difference
med = median(perf_diffs);

% get median of performance difference
avg = mean(perf_diffs);

% output interpretation
if h && med > 0
    fprintf('input 1 >> input 2 :)\n');
elseif h && med < 0
    fprintf('input 2 >> input 1 ;)\n');
else
    fprintf('no significant difference :( \n');
end


% ---
% joins the test data to a single performance vector
% ---
function perf = join_datasets(indi,weighted, sets2join)
perf = [];

for i = sets2join
    perf = [perf indi(i).diag.ok_test(weighted+1,:)];
end