annotate do_correlation_analyses.m @ 3:c5c97558fb2f

Added example output pictures.
author Jordan Smith <jordan.smith@eecs.qmul.ac.uk>
date Fri, 20 Sep 2013 17:12:46 +0100
parents 624231da830b
children 92b5a46bc67b
rev   line source
jordan@1 1 % Assuming we have followed the factory settings so far, we now have four datasets,
jordan@1 2 % and a whole lot of evaluation metrics. But note that in one evaluation (no. 2, mrx10_1),
jordan@1 3 % we do not want to consider any metrics related to labels, since the ground truth in this
jordan@1 4 % case had arbitrary labels. (It was done using boundary-only IRISA annotations.)
jordan@1 5 % So, we make two sets of indices, LAB_MEASURES and SEG_MEASURES. They are handy.
jordan@1 6 lab_measures = ismember(mirex_dset_origin,[1 3 4]);
jordan@1 7 seg_measures = ismember(mirex_dset_origin,[1 2 3 4]);
jordan@1 8
jordan@1 9 % Now we can do our correlation studies!
jordan@1 10 % First, generate figure 1a. For that, we call the function DO_CORRELATION.
jordan@1 11 % Type HELP DO_CORRELATION to understand what all the arguments mean... The short of it
jordan@1 12 % is that we select the songs, metrics and algorithms to compare, and then choose
jordan@1 13 % whether to take the median across all songs or across all algorithms.
jordan@1 14
jordan@2 15 [asig pval a a_] = do_correlation(megadatacube, lab_measures, indexing_info(1).manual_set, [1:9],...
jordan@1 16 0, 0, 1, 0, indexing_info(1).labels, 0.05);
jordan@1 17 saveas(gcf,'./plots/fig1a.jpg')
jordan@1 18
jordan@2 19 [asig pval a a_] = do_correlation(megadatacube, lab_measures, indexing_info(1).manual_set, [1:9],...
jordan@1 20 0, 1, 0, 0, indexing_info(1).labels, 0.05);
jordan@1 21 saveas(gcf,'./plots/fig1b.jpg')
jordan@1 22
jordan@2 23 [asig pval a a_] = do_correlation(megadatacube, seg_measures, indexing_info(2).manual_set, [1:9],...
jordan@1 24 0, 0, 1, 0, indexing_info(2).labels, 0.05);
jordan@1 25 saveas(gcf,'./plots/fig2a.jpg')
jordan@1 26
jordan@2 27 [asig pval a a_] = do_correlation(megadatacube, seg_measures, indexing_info(2).manual_set, [1:9],...
jordan@1 28 0, 1, 0, 0, indexing_info(2).labels, 0.05);
jordan@1 29 saveas(gcf,'./plots/fig2b.jpg')
jordan@1 30
jordan@2 31 [asig pval a a_] = do_correlation_fig3_only(megadatacube, lab_measures, [indexing_info(1).manual_set indexing_info(2).manual_set], [1:9], 0, 1, 0, 0, indexing_info(2).all_labels([indexing_info(1).manual_set indexing_info(2).manual_set]), 1, indexing_info(3).manual_set, indexing_info(3).labels);
jordan@1 32 saveas(gcf,'./plots/fig3.jpg')
jordan@1 33
jordan@1 34
jordan@1 35 do blah
jordan@1 36 % % % % % % % % % % % % The rest of this is still under construction, so I have inserted an error in the previous line to halt the script.
jordan@1 37
jordan@1 38 % Are the trends qualitatively similar across datasets?
jordan@1 39 % Fig 1a
jordan@2 40 figure,[asig pval a a_] = do_correlation(megadatacube, lab_measures, indexing_info(1).manual_set, [1:9], -1, 0, 1, -1, indexing_info(1).labels, 1);
jordan@2 41 figure,[asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,1), indexing_info(1).manual_set, [1:9], -1, 0, 1, -1, indexing_info(1).labels, 1);
jordan@2 42 figure,[asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,3), indexing_info(1).manual_set, [1:9], -1, 0, 1, -1, indexing_info(1).labels, 1);
jordan@2 43 figure,[asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,4), indexing_info(1).manual_set, [1:9], -1, 0, 1, -1, indexing_info(1).labels, 1);
jordan@1 44 % Fig 1b
jordan@2 45 figure, [asig pval a a_] = do_correlation(megadatacube, lab_measures, sind_manual1, [1:9], -1, 1, 0, -1, indexing_info(1).labels, 1);
jordan@2 46 figure, [asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,1), indexing_info(1).manual_set, [1:9], -1, 1, 0, -1, indexing_info(1).labels, 1);
jordan@2 47 figure, [asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,3), indexing_info(1).manual_set, [1:9], -1, 1, 0, -1, indexing_info(1).labels, 1);
jordan@2 48 figure, [asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,4), indexing_info(1).manual_set, [1:9], -1, 1, 0, -1, indexing_info(1).labels, 1);
jordan@1 49 % Fig 2a
jordan@2 50 figure, [asig pval a a_] = do_correlation(megadatacube, seg_measures, sind_manual2, [1:9], -1, 0, 1, -1, indexing_info(2).labels, 1);
jordan@2 51 figure, [asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,1), indexing_info(2).manual_set, [1:9], -1, 0, 1, -1, indexing_info(2).labels, 1);
jordan@2 52 figure, [asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,2), indexing_info(2).manual_set, [1:9], -1, 0, 1, -1, indexing_info(2).labels, 1);
jordan@2 53 figure, [asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,3), indexing_info(2).manual_set, [1:9], -1, 0, 1, -1, indexing_info(2).labels, 1);
jordan@2 54 figure, [asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,4), indexing_info(2).manual_set, [1:9], -1, 0, 1, -1, indexing_info(2).labels, 1);
jordan@1 55 % Fig 2b
jordan@2 56 figure, [asig pval a a_] = do_correlation(megadatacube, seg_measures, sind_manual2, [1:9], -1, 1, 0, -1, indexing_info(2).labels, 1);
jordan@2 57 figure, [asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,1), indexing_info(2).manual_set, [1:9], -1, 1, 0, -1, indexing_info(2).labels, 1);
jordan@2 58 figure, [asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,2), indexing_info(2).manual_set, [1:9], -1, 1, 0, -1, indexing_info(2).labels, 1);
jordan@2 59 figure, [asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,3), indexing_info(2).manual_set, [1:9], -1, 1, 0, -1, indexing_info(2).labels, 1);
jordan@2 60 figure, [asig pval a a_] = do_correlation(megadatacube, ismember(mirex_dset_origin,4), indexing_info(2).manual_set, [1:9], -1, 1, 0, -1, indexing_info(2).labels, 1);
jordan@1 61
jordan@1 62
jordan@1 63 % "Does this indicate that the algorithms are better at boundary precision than recall? In fact, the opposite is the case: average bp6 bp.5 was simply consistently worse for most algorithms."
jordan@1 64 % For all algos:
jordan@1 65 mean(median(megadatacube(:,sind_manual2,:),3),1)
jordan@1 66 % For each algo:
jordan@1 67 mean(megadatacube(:,sind_manual2,:),1)
jordan@1 68
jordan@1 69
jordan@1 70 H = boxplot(megadatacube(:,[17 21],:))
jordan@1 71
jordan@1 72 tmp = sort(megadatacube(:,17,:));
jordan@1 73 tmp2 = sort(megadatacube(:,21,:));
jordan@1 74 tmp2(round(length(tmp2)/4),:,:), tmp2(round(length(tmp2)*3/4),:,:)
jordan@1 75
jordan@1 76 tmp2 = sort(tmp2(:));
jordan@1 77 tmp2(round(length(tmp2)/4)), tmp2(3*round(length(tmp2)/4))
jordan@1 78
jordan@1 79
jordan@1 80 % % % % % % % % % % % ENd OF REAL WORK AREA % % % % % % % % % % % % %
jordan@1 81
jordan@1 82
jordan@1 83 clf,imagesc(a.*(abs(a)>.7))
jordan@1 84 set(gca,'XTickLabel',[],'XTick',(1:50)-.5)
jordan@1 85 set(gca,'YTickLabel',s,'YTick',(1:50))
jordan@1 86 t = text((1:50)-.5,51*ones(1,50),s);
jordan@1 87 set(t,'HorizontalAlignment','right','VerticalAlignment','top', 'Rotation',90);
jordan@1 88 hold on
jordan@1 89 for i=1:9,
jordan@1 90 plot([0 50],[i*5 i*5],'w')
jordan@1 91 plot([i*5 i*5],[0 50],'w')
jordan@1 92 end
jordan@1 93
jordan@1 94 % a = corr([datacube(1:300,:,1) newcube(1:300,:,1) newmetriccube(1:300,:,1)]);
jordan@1 95
jordan@1 96 a = corr([datacube(lab_measures,:,1) newcube(lab_measures,:,1) newmetriccube(lab_measures,:,1)]);
jordan@1 97 b = corr([datacube(seg_measures,:,1) newcube(seg_measures,:,1) newmetriccube(seg_measures,:,1)]);
jordan@1 98
jordan@1 99 % Look at label measures only in this case.
jordan@1 100 imagesc(sortrows(transpose(sortrows((abs(a)>0.7)))))
jordan@1 101 [t1 t2] = (sortrows(transpose(sortrows((abs(a)>0.7)))));
jordan@1 102
jordan@1 103
jordan@1 104 b = zeros(size(a));
jordan@1 105 for j=[3,4,5,6,7,9],
jordan@1 106 b = b+corr([datacube(:,:,j) newcube(:,:,j) newmetriccube(:,:,j)]);
jordan@1 107 end
jordan@1 108 b=b/6;
jordan@1 109
jordan@1 110
jordan@1 111 % Look at correlations among all figures, but pay attention to pvalues too.
jordan@1 112 % Only plot those less than 0.05, with conservative bonferroni correction.
jordan@1 113 megadatacube_l = [datacube(lab_measures,:,:) newcube(lab_measures,:,:) newmetriccube(lab_measures,:,:)];
jordan@1 114 megadatacube_s = [datacube(seg_measures,:,:) newcube(seg_measures,:,:) newmetriccube(seg_measures,:,:)];
jordan@1 115 % megadatacube_l = median(megadatacube_l(:,use_these_labels,:),3);
jordan@1 116 % megadatacube_s = median(megadatacube_s(:,use_these_segs,:),3);
jordan@1 117
jordan@1 118
jordan@1 119
jordan@1 120 megadatacube_all = median(megadatacube_l(:,[use_these_labels use_these_segs use_these_extras],:),3);
jordan@1 121 megadatacube_all(:,16:17) = 1 - megadatacube_all(:,16:17);
jordan@1 122 [al pval] = corr(megadatacube_all);
jordan@1 123 m = length(al)*(length(al)-1)/2;
jordan@1 124 imagesc(al.*((pval*m)<0.05))
jordan@1 125 al_ = al.*((pval*m)<0.05);
jordan@1 126 al_ = tril(al_ .* (abs(al_)>.5));
jordan@1 127 imagesc(al_)
jordan@1 128 for i=1:length(al_),
jordan@1 129 for j=1:length(al_),
jordan@1 130 if (al_(i,j)~=0) & (i~=j),
jordan@1 131 text(j-.35,i,num2str(al_(i,j),2))
jordan@1 132 end
jordan@1 133 end
jordan@1 134 end
jordan@1 135 % [bl pvbl] = corr(megadatacube_all,'type','Kendall');
jordan@1 136 m = length(bl)*(length(bl)-1)/2;
jordan@1 137 imagesc(bl.*((pvbl*m)<0.05))
jordan@1 138 bl_ = bl.*((pvbl*m)<0.05);
jordan@1 139 bl_ = tril(bl_) % .* (abs(bl_)>.0));
jordan@1 140 imagesc(bl_)
jordan@1 141 for i=1:length(bl_),
jordan@1 142 for j=1:length(bl_),
jordan@1 143 if (bl_(i,j)~=0) & (i~=j),
jordan@1 144 text(j-.35,i,num2str(bl_(i,j),2))
jordan@1 145 end
jordan@1 146 end
jordan@1 147 end
jordan@1 148
jordan@1 149 % Or, we could do this: Take all the computed Kendall taus, i.e., the non-diagonal elements of bl.
jordan@1 150 taus = bl(find(bl<1));
jordan@1 151 taus = taus-mean(taus);
jordan@1 152 taus = taus/std(taus);
jordan@1 153 P = normcdf(-abs(taus));
jordan@1 154 ind = find(P<=0.05);
jordan@1 155 taus = bl(find(bl<1));
jordan@1 156 taus(ind)
jordan@1 157
jordan@1 158 c = colormap;
jordan@1 159 c(32,:) = [1 1 1];
jordan@1 160 c(31,:) = [1 1 1];
jordan@1 161 c = min(1,c*1.6);
jordan@1 162 colormap(c)
jordan@1 163 set(gca,'XTickLabel',[],'XTick',(1:length(al_))-.4)
jordan@1 164 set(gca,'YTickLabel',s([use_these_labels use_these_segs use_these_extras]),'YTick',(1:length(al_)))
jordan@1 165 t = text((1:length(al_))-.3,(length(al_)+1)*ones(1,length(al_))+.3,s([use_these_labels use_these_segs use_these_extras]));
jordan@1 166 set(t,'HorizontalAlignment','right','VerticalAlignment','top', 'Rotation',90);
jordan@1 167 axis([0 31 0 31])
jordan@1 168 saveas(gcf,'./plots/all_correlations.jpg')
jordan@1 169
jordan@1 170 s = {'S_o','S_u','pw_f','pw_p','pw_r','rand','bf1','bp1','br1','bf6','bp6','br6','mt2c','mc2t','ds','len','nsa','nla','msla','nspla','nse','nle','msle','nsple','ob','ol','pw_f_x','pw_p_x','pw_r_x','K','asp','acp','I_AE_x','H_EA_x','H_AE_x','S_o_x','S_u_x','rand','mt2c_x','mc2t_x','m','f','d_ae_x','d_ea_x','b_f1_x','b_p1_x','b_r1_x','b_f6_x','b_p6_x','b_r6_x'};
jordan@1 171 s_type = [1,2,3,1,2,3,6,4,5,6,4,5,4,5, 7,7,7,7,7,7,7,7,7,7,7,7,3,1,2,3,2,1,3,1,2,1,2, 3,4,5,5,4,7,7,3,1,2,3,1,2];
jordan@1 172 megadatacube_s(:,40:41,:) = 1 - megadatacube_s(:,40:41,:);
jordan@1 173 megadatacube_s(:,51,:) = 2*megadatacube_s(:,38,:).*megadatacube_s(:,39,:)./(megadatacube_s(:,38,:)+megadatacube_s(:,39,:));
jordan@1 174 % This makes a new 51st metric which is a combination of m and f.
jordan@1 175 s_type(51) = 6;
jordan@1 176 s{51} = 'mf';
jordan@1 177
jordan@1 178
jordan@1 179 % [a pval] = corr(median([datacube(lab_measures,:,1) newcube(lab_measures,:,1) newmetriccube(lab_measures,:,1)],3));
jordan@1 180 [a pval] = corr(mean(megadatacube_l,3));
jordan@1 181 m = length(a)*(length(a)-1)/2;
jordan@1 182 imagesc(a.*((pval*m)<0.05))
jordan@1 183 a_ = a.*((pval*m)<0.05);
jordan@1 184 c = colormap;
jordan@1 185 c(32,:) = [1 1 1];
jordan@1 186 colormap(c)
jordan@1 187
jordan@1 188 % I want to make a claim about song length correlating to the algorithms or not. Let us make sure it is valid across all algorithms, and is not just applicable to the median:
jordan@1 189 for j=1:9,
jordan@1 190 a = corr([datacube(lab_measures,:,j) newcube(lab_measures,:,j) newmetriccube(lab_measures,:,j)]);
jordan@1 191 a(16,[17 19 21 23])
jordan@1 192 end
jordan@1 193
jordan@1 194 % BoxPlot of the number of segments in each algorithm output
jordan@1 195 boxplot(reshape(newcube(:,7,:),[length(newcube),9,1]))
jordan@1 196
jordan@1 197 % Look at best 10 and worst 10 songs in each dataset, according to PW_F metric.
jordan@1 198 % Average results across algorithms for this one.
jordan@1 199 unique_algorithms = [3 4 5 6 7];
jordan@1 200 tmp = datacube;
jordan@1 201 tmp(:,:,3) = mean(tmp(:,:,[1:3,9]),3);
jordan@1 202 tmp(:,:,7) = mean(tmp(:,:,7:8),3);
jordan@1 203 tmp = mean(tmp(lab_measures,:,unique_algorithms),3);
jordan@1 204 [tmp1 order] = sortrows(tmp,-3);
jordan@1 205 order1 = lab_measures(order);
jordan@1 206 pub_songids = X.mir2pub(order1);
jordan@1 207 values = tmp1((pub_songids>0),3);
jordan@1 208 filenames = {};
jordan@1 209 for i=1:length(pub_songids),
jordan@1 210 if pub_songids(i)>0,
jordan@1 211 filenames{end+1} = X.pubanns(pub_songids(i)).file;
jordan@1 212 end
jordan@1 213 end
jordan@1 214
jordan@1 215 mirid = pub2mir(336);
jordan@1 216 make_structure_image(mirid, miranns, MD, mirdset, X, MR)
jordan@1 217 saveas(gcf,'./plots/MJ_dont_care.jpg')
jordan@1 218 make_structure_image(121, miranns, MD, mirdset, X, MR)
jordan@1 219 saveas(gcf,'./plots/play_the_game.jpg')
jordan@1 220
jordan@1 221 % Plot difficulty by album:
jordan@1 222
jordan@1 223
jordan@1 224 genres = {};
jordan@1 225 subgenres = {};
jordan@1 226 issalami = zeros(length(filenames),1);
jordan@1 227 for i=1:length(filenames),
jordan@1 228 file = filenames{i};
jordan@1 229 if strfind(file,'SALAMI_data'),
jordan@1 230 issalami(i)=1;
jordan@1 231 salami_id = file(79:85);
jordan@1 232 salami_id = salami_id(1:strfind(salami_id,'/')-1);
jordan@1 233 salami_row = find(aaux.metadata{1}==str2num(salami_id));
jordan@1 234 genres{end+1} = cell2mat(aaux.metadata{15}(salami_row));
jordan@1 235 subgenres{end+1} = cell2mat(aaux.metadata{16}(salami_row));
jordan@1 236 end
jordan@1 237 end
jordan@1 238 gs = grp2idx(genres);
jordan@1 239 subgs = grp2idx(subgenres);
jordan@1 240 boxplot(values(find(issalami)),transpose(genres))
jordan@1 241 axis([0.5 5.5 0 1])
jordan@1 242 saveas(gcf,'salami_breakdown.png')
jordan@1 243 boxplot(values(find(issalami)),transpose(subgenres),'colors',cmap(round(gs*63/6),:),'orientation','horizontal')
jordan@1 244
jordan@1 245 [tmp1 tmp2] = hist(subgs,max(subgs)-1);
jordan@1 246 tmp1 = find(tmp1>5); % do these subgenres only
jordan@1 247 tmp1 = ismember(subgs,tmp1);
jordan@1 248 tmp2 = find(issalami);
jordan@1 249 boxplot(values(tmp2(tmp1)),transpose(subgenres(tmp1)),'colors',cmap(round(gs(tmp1)*63/6),:),'orientation','horizontal')
jordan@1 250
jordan@1 251
jordan@1 252
jordan@1 253
jordan@1 254
jordan@1 255 % Look at scatter plots so that we can qualitatively attribute the correlations to things (e.g., low-precision variance).
jordan@1 256 tmpcube = mean(datacube,3);
jordan@1 257 for i=1:4,
jordan@1 258 for j=i+1:5,
jordan@1 259 subplot(5,5,i+(j-1)*5)
jordan@1 260 scatter(tmpcube(:,i),tmpcube(:,j),'x')
jordan@1 261 end
jordan@1 262 end
jordan@1 263
jordan@1 264
jordan@1 265
jordan@1 266
jordan@1 267
jordan@1 268
jordan@1 269
jordan@1 270
jordan@1 271
jordan@1 272
jordan@1 273
jordan@1 274
jordan@1 275
jordan@1 276
jordan@1 277
jordan@1 278
jordan@1 279
jordan@1 280
jordan@1 281
jordan@1 282
jordan@1 283
jordan@1 284
jordan@1 285
jordan@1 286
jordan@1 287
jordan@1 288
jordan@1 289
jordan@1 290
jordan@1 291
jordan@1 292
jordan@1 293
jordan@1 294
jordan@1 295
jordan@1 296
jordan@1 297
jordan@1 298
jordan@1 299
jordan@1 300
jordan@1 301
jordan@1 302
jordan@1 303
jordan@1 304
jordan@1 305
jordan@1 306
jordan@1 307
jordan@1 308
jordan@1 309
jordan@1 310
jordan@1 311
jordan@1 312 % Now again, we will want to run the correlation study by taking medians across algorithms (do the metrics rank the songs the same way?) and medians across songs (do the metrics rank the algorithms the same way?).
jordan@1 313
jordan@1 314 % Take the label metrics only, take median across songs:
jordan@1 315 % tmpcube = median(megadatacube_l(:,sind_manual1,:),1);
jordan@1 316 % tmpcube = transpose(reshape(tmpcube,size(tmpcube,2),size(tmpcube,3)));
jordan@1 317 % [a pval] = corr(tmpcube,'type','Kendall');
jordan@1 318 % m = length(a)*(length(a)-1)/2;
jordan@1 319 % a.*((pval*m)<0.05); % This is the matrix of values that are significant.
jordan@1 320 % Alternatively, we can plot all the metrics, treat them as random normal variables, and select only those that stand out.
jordan@1 321
jordan@1 322
jordan@1 323
jordan@1 324 % [asig pval a] = do_correlation(megadatacube, songs, metrics, algos, algo_groups, merge_algos (1 = do, 0 = do not), merge_songs, merge_dsets, metric_labels)
jordan@1 325 [asig pval a] = do_correlation(megadatacube, lab_measures, sind_manual1, [1:9], -1, 0, 1, -1, s_manual1)
jordan@1 326 [asig pval a] = do_correlation(megadatacube, lab_measures, [use_these_labels use_these_segs], [1:9], -1, 0, 1, -1, s([use_these_labels use_these_segs]))
jordan@1 327
jordan@1 328 [asig pval a] = do_correlation(megadatacube, lab_measures, [1:12], [1:9], -1, 0, 1, -1, s(1:12))
jordan@1 329
jordan@1 330
jordan@1 331 [a pval] = corr(megadatacube_l(:,:,1),'type','Kendall');
jordan@1 332
jordan@1 333
jordan@1 334
jordan@1 335 % Take the label metrics only, take median across algorithms:
jordan@1 336 tmpcube = median(megadatacube_l(:,sind_manual1,:),3);
jordan@1 337 [a pval] = corr(tmpcube); %,'type','Kendall');
jordan@1 338 m = length(a)*(length(a)-1)/2;
jordan@1 339 a.*((pval*m)<0.05); % This is the matrix of values that are significant.
jordan@1 340 % However, with so many data points (over 1400) it is very easy to be significant...
jordan@1 341
jordan@1 342
jordan@1 343
jordan@1 344
jordan@1 345 imagesc(a.*((pval*m)<0.05))
jordan@1 346 al_ = al.*((pval*m)<0.05);
jordan@1 347 al_ = tril(al_ .* (abs(al_)>.5));
jordan@1 348 imagesc(al_)
jordan@1 349 for i=1:length(al_),
jordan@1 350 for j=1:length(al_),
jordan@1 351 if (al_(i,j)~=0) & (i~=j),
jordan@1 352 text(j-.35,i,num2str(al_(i,j),2))
jordan@1 353 end
jordan@1 354 end
jordan@1 355 end
jordan@1 356
jordan@1 357
jordan@1 358 clf,imagesc(a.*(abs(a)>.7))
jordan@1 359 set(gca,'XTickLabel',[],'XTick',(1:50)-.5)
jordan@1 360 set(gca,'YTickLabel',s,'YTick',(1:50))
jordan@1 361 t = text((1:50)-.5,51*ones(1,50),s);
jordan@1 362 set(t,'HorizontalAlignment','right','VerticalAlignment','top', 'Rotation',90);
jordan@1 363 hold on
jordan@1 364 for i=1:9,
jordan@1 365 plot([0 50],[i*5 i*5],'w')
jordan@1 366 plot([i*5 i*5],[0 50],'w')
jordan@1 367 end
jordan@1 368
jordan@1 369 % a = corr([datacube(1:300,:,1) newcube(1:300,:,1) extracube(1:300,:,1)]);
jordan@1 370
jordan@1 371 a = corr([datacube(lab_measures,:,1) newcube(lab_measures,:,1) extracube(lab_measures,:,1)]);
jordan@1 372 b = corr([datacube(seg_measures,:,1) newcube(seg_measures,:,1) extracube(seg_measures,:,1)]);
jordan@1 373
jordan@1 374 % Look at label measures only in this case.
jordan@1 375 imagesc(sortrows(transpose(sortrows((abs(a)>0.7)))))
jordan@1 376 [t1 t2] = (sortrows(transpose(sortrows((abs(a)>0.7)))));
jordan@1 377
jordan@1 378
jordan@1 379 b = zeros(size(a));
jordan@1 380 for j=[3,4,5,6,7,9],
jordan@1 381 b = b+corr([datacube(:,:,j) newcube(:,:,j) extracube(:,:,j)]);
jordan@1 382 end
jordan@1 383 b=b/6;
jordan@1 384
jordan@1 385
jordan@1 386 % Look at correlations among all figures, but pay attention to pvalues too.
jordan@1 387 % Only plot those less than 0.05, with conservative bonferroni correction.
jordan@1 388 megadatacube_l = [datacube(lab_measures,:,:) newcube(lab_measures,:,:) extracube(lab_measures,:,:)];
jordan@1 389 megadatacube_s = [datacube(seg_measures,:,:) newcube(seg_measures,:,:) extracube(seg_measures,:,:)];
jordan@1 390 % megadatacube_l = median(megadatacube_l(:,use_these_labels,:),3);
jordan@1 391 % megadatacube_s = median(megadatacube_s(:,use_these_segs,:),3);
jordan@1 392
jordan@1 393
jordan@1 394
jordan@1 395 megadatacube_all = median(megadatacube_l(:,[use_these_labels use_these_segs use_these_extras],:),3);
jordan@1 396 megadatacube_all(:,16:17) = 1 - megadatacube_all(:,16:17);
jordan@1 397 [al pval] = corr(megadatacube_all);
jordan@1 398 m = length(al)*(length(al)-1)/2;
jordan@1 399 imagesc(al.*((pval*m)<0.05))
jordan@1 400 al_ = al.*((pval*m)<0.05);
jordan@1 401 al_ = tril(al_ .* (abs(al_)>.5));
jordan@1 402 imagesc(al_)
jordan@1 403 for i=1:length(al_),
jordan@1 404 for j=1:length(al_),
jordan@1 405 if (al_(i,j)~=0) & (i~=j),
jordan@1 406 text(j-.35,i,num2str(al_(i,j),2))
jordan@1 407 end
jordan@1 408 end
jordan@1 409 end
jordan@1 410 % [bl pvbl] = corr(megadatacube_all,'type','Kendall');
jordan@1 411 m = length(bl)*(length(bl)-1)/2;
jordan@1 412 imagesc(bl.*((pvbl*m)<0.05))
jordan@1 413 bl_ = bl.*((pvbl*m)<0.05);
jordan@1 414 bl_ = tril(bl_) % .* (abs(bl_)>.0));
jordan@1 415 imagesc(bl_)
jordan@1 416 for i=1:length(bl_),
jordan@1 417 for j=1:length(bl_),
jordan@1 418 if (bl_(i,j)~=0) & (i~=j),
jordan@1 419 text(j-.35,i,num2str(bl_(i,j),2))
jordan@1 420 end
jordan@1 421 end
jordan@1 422 end
jordan@1 423
jordan@1 424 % Or, we could do this: Take all the computed Kendall taus, i.e., the non-diagonal elements of bl.
jordan@1 425 taus = bl(find(bl<1));
jordan@1 426 taus = taus-mean(taus);
jordan@1 427 taus = taus/std(taus);
jordan@1 428 P = normcdf(-abs(taus));
jordan@1 429 ind = find(P<=0.05);
jordan@1 430 taus = bl(find(bl<1));
jordan@1 431 taus(ind)
jordan@1 432
jordan@1 433 c = colormap;
jordan@1 434 c(32,:) = [1 1 1];
jordan@1 435 c(31,:) = [1 1 1];
jordan@1 436 c = min(1,c*1.6);
jordan@1 437 colormap(c)
jordan@1 438 set(gca,'XTickLabel',[],'XTick',(1:length(al_))-.4)
jordan@1 439 set(gca,'YTickLabel',s([use_these_labels use_these_segs use_these_extras]),'YTick',(1:length(al_)))
jordan@1 440 t = text((1:length(al_))-.3,(length(al_)+1)*ones(1,length(al_))+.3,s([use_these_labels use_these_segs use_these_extras]));
jordan@1 441 set(t,'HorizontalAlignment','right','VerticalAlignment','top', 'Rotation',90);
jordan@1 442 axis([0 31 0 31])
jordan@1 443 saveas(gcf,'./plots/all_correlations.jpg')
jordan@1 444
jordan@1 445 s = {'S_o','S_u','pw_f','pw_p','pw_r','rand','bf1','bp1','br1','bf6','bp6','br6','mt2c','mc2t','ds','len','nsa','nla','msla','nspla','nse','nle','msle','nsple','ob','ol','pw_f_x','pw_p_x','pw_r_x','K','asp','acp','I_AE_x','H_EA_x','H_AE_x','S_o_x','S_u_x','rand','mt2c_x','mc2t_x','m','f','d_ae_x','d_ea_x','b_f1_x','b_p1_x','b_r1_x','b_f6_x','b_p6_x','b_r6_x'};
jordan@1 446 s_type = [1,2,3,1,2,3,6,4,5,6,4,5,4,5, 7,7,7,7,7,7,7,7,7,7,7,7,3,1,2,3,2,1,3,1,2,1,2, 3,4,5,5,4,7,7,3,1,2,3,1,2];
jordan@1 447 megadatacube_s(:,38:39,:) = 1 - megadatacube_s(:,38:39,:);
jordan@1 448 megadatacube_s(:,51,:) = 2*megadatacube_s(:,38,:).*megadatacube_s(:,39,:)./(megadatacube_s(:,38,:)+megadatacube_s(:,39,:));
jordan@1 449 % This makes a new 51st metric which is a combination of m and f.
jordan@1 450 s_type(51) = 6;
jordan@1 451 s{51} = 'mf';
jordan@1 452
jordan@1 453
jordan@1 454 % [a pval] = corr(median([datacube(lab_measures,:,1) newcube(lab_measures,:,1) extracube(lab_measures,:,1)],3));
jordan@1 455 [a pval] = corr(mean(megadatacube_l,3));
jordan@1 456 m = length(a)*(length(a)-1)/2;
jordan@1 457 imagesc(a.*((pval*m)<0.05))
jordan@1 458 a_ = a.*((pval*m)<0.05);
jordan@1 459 c = colormap;
jordan@1 460 c(32,:) = [1 1 1];
jordan@1 461 colormap(c)
jordan@1 462
jordan@1 463 % I want to make a claim about song length correlating to the algorithms or not. Let us make sure it is valid across all algorithms, and is not just applicable to the median:
jordan@1 464 for j=1:9,
jordan@1 465 a = corr([datacube(lab_measures,:,j) newcube(lab_measures,:,j) extracube(lab_measures,:,j)]);
jordan@1 466 a(16,[17 19 21 23])
jordan@1 467 end
jordan@1 468
jordan@1 469 % BoxPlot of the number of segments in each algorithm output
jordan@1 470 boxplot(reshape(newcube(:,7,:),[length(newcube),9,1]))
jordan@1 471
jordan@1 472 % Look at best 10 and worst 10 songs in each dataset, according to PW_F metric.
jordan@1 473 % Average results across algorithms for this one.
jordan@1 474 unique_algorithms = [3 4 5 6 7];
jordan@1 475 tmp = datacube;
jordan@1 476 tmp(:,:,3) = mean(tmp(:,:,[1:3,9]),3);
jordan@1 477 tmp(:,:,7) = mean(tmp(:,:,7:8),3);
jordan@1 478 tmp = mean(tmp(lab_measures,:,unique_algorithms),3);
jordan@1 479 [tmp1 order] = sortrows(tmp,-3);
jordan@1 480 order1 = lab_measures(order);
jordan@1 481 pub_songids = X.mir2pub(order1);
jordan@1 482 values = tmp1((pub_songids>0),3);
jordan@1 483 filenames = {};
jordan@1 484 for i=1:length(pub_songids),
jordan@1 485 if pub_songids(i)>0,
jordan@1 486 filenames{end+1} = public_truth(pub_songids(i)).file;
jordan@1 487 end
jordan@1 488 end
jordan@1 489
jordan@1 490 mirid = pub2mir(336);
jordan@1 491 make_structure_image(mirid, mirex_truth, mirex_output, mirex_dset_origin, X, mirex_results)
jordan@1 492 saveas(gcf,'./plots/MJ_dont_care.jpg')
jordan@1 493 make_structure_image(121, mirex_truth, mirex_output, mirex_dset_origin, X, mirex_results)
jordan@1 494 saveas(gcf,'./plots/play_the_game.jpg')
jordan@1 495
jordan@1 496 % Plot difficulty by album:
jordan@1 497
jordan@1 498
jordan@1 499 genres = {};
jordan@1 500 subgenres = {};
jordan@1 501 issalami = zeros(length(filenames),1);
jordan@1 502 for i=1:length(filenames),
jordan@1 503 file = filenames{i};
jordan@1 504 if strfind(file,'SALAMI_data'),
jordan@1 505 issalami(i)=1;
jordan@1 506 salami_id = file(79:85);
jordan@1 507 salami_id = salami_id(1:strfind(salami_id,'/')-1);
jordan@1 508 salami_row = find(aaux.metadata{1}==str2num(salami_id));
jordan@1 509 genres{end+1} = cell2mat(aaux.metadata{15}(salami_row));
jordan@1 510 subgenres{end+1} = cell2mat(aaux.metadata{16}(salami_row));
jordan@1 511 end
jordan@1 512 end
jordan@1 513 gs = grp2idx(genres);
jordan@1 514 subgs = grp2idx(subgenres);
jordan@1 515 boxplot(values(find(issalami)),transpose(genres))
jordan@1 516 axis([0.5 5.5 0 1])
jordan@1 517 saveas(gcf,'salami_breakdown.png')
jordan@1 518 boxplot(values(find(issalami)),transpose(subgenres),'colors',cmap(round(gs*63/6),:),'orientation','horizontal')
jordan@1 519
jordan@1 520 [tmp1 tmp2] = hist(subgs,max(subgs)-1);
jordan@1 521 tmp1 = find(tmp1>5); % do these subgenres only
jordan@1 522 tmp1 = ismember(subgs,tmp1);
jordan@1 523 tmp2 = find(issalami);
jordan@1 524 boxplot(values(tmp2(tmp1)),transpose(subgenres(tmp1)),'colors',cmap(round(gs(tmp1)*63/6),:),'orientation','horizontal')