annotate compile_datacubes.m @ 6:e2337cd691b1 tip

Finishing writing the matlab code to replicate all observations made in the article. Added the article to the repository. Renamed the two main scripts ("1-get_mirex_estimates.rb" and "2-generate_smith2013_ismir.m") to not have dashes (since this was annoying within Matlab) Added new Michael Jackson figure.
author Jordan Smith <jordan.smith@eecs.qmul.ac.uk>
date Wed, 05 Mar 2014 01:02:26 +0000
parents 92b5a46bc67b
children
rev   line source
jordan@1 1 function [datacube newcube extracube indexing_info] = compile_datacubes(mirex_truth, mirex_dset_origin, public_truth, mirex_output, mirex_results, mir2pub)
jordan@1 2 % function [datacube newcube extracube indexing_info] = compile_datacubes(mirex_truth, ...
jordan@1 3 % mirex_dset_origin, public_truth, mirex_output, mirex_results, mir2pub)
jordan@1 4 % % % % % % % % % LOAD DATA % % % % % % % % %
jordan@1 5 %
jordan@1 6 %
jordan@1 7 % We have now loaded all the data we could possibly be interested in. (* Almost; see below.)
jordan@1 8 % But it is not in the easiest form to process. I.e., it is not in a big matrix.
jordan@1 9 % So the idea now is to assemble DATACUBES with all the data we are interested in.
jordan@1 10 % Each DATACUBE will have three dimensions:
jordan@1 11 % DATACUBE(i,j,k)
jordan@1 12 % will store the performance on song i, according to metric j, by algorithm k.
jordan@1 13 % The index of the song (i) is its position in the giant matrix MIREX_TRUTH of loaded
jordan@1 14 % ground truth files published by MIREX.
jordan@1 15 % The index of the metrics (j) is determined by the MIREX spreadsheet. (The fields are
jordan@1 16 % summarized below.) The matrix itself will not have a 'header' row, so you must
jordan@1 17 % look at this script to know what is what.
jordan@1 18 % The index of the algorithms (k) is determined by how the algorithms were loaded.
jordan@1 19
jordan@1 20 % % % % % % % % % % % COLLECT DATACUBES % % % % % % % % % % % % %
jordan@1 21
jordan@1 22
jordan@1 23 % In this script, the 'n' at the beginning of a variable means 'number of'.
jordan@1 24 n_songs = size(mir2pub,1);
jordan@1 25 n_metrics = size(mirex_results(1).algo(1).results,2);
jordan@1 26 n_algos = size(mirex_results(1).algo,2);
jordan@1 27 datacube = zeros(n_songs, n_metrics, n_algos);
jordan@1 28
jordan@1 29 % DATACUBE contains evaluation data published by MIREX:
jordan@1 30 topelement = 1;
jordan@1 31 for i=1:length(mirex_results),
jordan@1 32 for j=1:9,
jordan@1 33 datacube(topelement:topelement-1+length(mirex_results(i).algo(j).results),:,j) = mirex_results(i).algo(j).results;
jordan@1 34 end
jordan@1 35 topelement = topelement + length(mirex_results(i).algo(j).results);
jordan@1 36 end
jordan@1 37
jordan@1 38 fprintf('Collecting new data about annotations......')
jordan@1 39
jordan@1 40 % NEWCUBE contains data related to properties of the estimated and annotated descriptions:
jordan@1 41 newcube = zeros(size(datacube,1),12,size(datacube,3));
jordan@1 42 % For each song,
jordan@1 43 for i=1:size(datacube,1),
jordan@1 44 % Collect information specific to the annotation:
jordan@1 45 song_length = mirex_truth(i).tim(end)-mirex_truth(i).tim(1);
jordan@1 46 n_segs_ann = length(mirex_truth(i).tim)-1; % number of segments in the annotation
jordan@1 47 n_labs_ann = length(unique(mirex_truth(i).lab))-1; % number of labels in the annotation
jordan@1 48 mean_seg_len_ann = song_length/n_segs_ann; % average length of segments in the annotation
jordan@1 49 n_segs_per_lab_ann = n_segs_ann/n_labs_ann; % number of segments per label
jordan@1 50 % We now want to look up the algorithm output corresponding to this MIREX annotation.
jordan@1 51 % Unforunately, the indexing is tricky. The MIREX annotations are indexed 1 to 1497 (unless
jordan@1 52 % you changed the defaults), whereas the MIREX algorithm output is sorted by dataset and
jordan@1 53 % re-indexed: e.g., the 298th annotation is actually the 1st song of dataset 2.
jordan@1 54 % So we need to do a little archaeology to get I_WRT_DSET, the index with respect to
jordan@1 55 % the dataset.
jordan@1 56 dset = mirex_dset_origin(i);
jordan@1 57 tmp = find(mirex_dset_origin==dset);
jordan@1 58 i_wrt_dset = find(tmp==i);
jordan@1 59 % You can test that this indexing worked by looking at the file names of the two descriptions:
jordan@1 60 % mirex_truth(i).file
jordan@1 61 % mirex_output(dset).algo(1).song(i_wrt_dset).file
jordan@1 62 for j=1:9,
jordan@1 63 % Collect information specific to the estimated description:
jordan@1 64 n_segs_est = length(mirex_output(dset).algo(j).song(i_wrt_dset).tim) - 1;
jordan@1 65 n_labs_est = length(unique(mirex_output(dset).algo(j).song(i_wrt_dset).lab)) - 1;
jordan@1 66 mean_seg_len_est = song_length/n_segs_est;
jordan@1 67 n_segs_per_lab_est = n_segs_est/n_labs_est;
jordan@1 68 overseg_bound = n_segs_est-n_segs_ann; % Direct measure of oversegmentation (too many sections) by estimate
jordan@1 69 overseg_label = n_labs_est-n_labs_ann; % Direct measure of overdiscrimination (too many label types) by estimate
jordan@1 70 % Record all this new data to the NEWCUBE:
jordan@1 71 newcube(i,:,j) = [dset, song_length, n_segs_ann, n_labs_ann, mean_seg_len_ann, n_segs_per_lab_ann, ...
jordan@1 72 n_segs_est, n_labs_est, mean_seg_len_est, n_segs_per_lab_est, overseg_bound, overseg_label];
jordan@1 73 end
jordan@1 74 end
jordan@1 75
jordan@1 76 fprintf('Done!\nConducting new evaluation of MIREX data.......')
jordan@1 77
jordan@1 78 % EXTRACUBE contains recalculations of the metrics using our own evaluation package.
jordan@1 79 % Computing the metrics takes a little while.
jordan@1 80 extracube = zeros(size(datacube,1),24,size(datacube,3));
jordan@1 81 tic
jordan@1 82 for i=1:size(datacube,1)
jordan@1 83 dset = mirex_dset_origin(i);
jordan@1 84 tmp = find(mirex_dset_origin==dset);
jordan@1 85 i_wrt_dset = find(tmp==i);
jordan@1 86 % Get onsets and labels from Annotation:
jordan@1 87 a_onset = mirex_truth(i).tim;
jordan@1 88 a_label = mirex_truth(i).lab;
jordan@1 89 for j=1:9,
jordan@1 90 % Get onsets and labels from Estimated description:
jordan@1 91 e_onset = mirex_output(dset).algo(j).song(i_wrt_dset).tim;
jordan@1 92 e_label = mirex_output(dset).algo(j).song(i_wrt_dset).lab;
jordan@1 93 [tmp labres segres] = compare_structures(e_onset, e_label, a_onset, a_label);
jordan@1 94 extracube(i,:,j) = [labres segres];
jordan@1 95 end
jordan@1 96 % It can be nice to see a progress meter... It took me about 30 seconds to compute 100 songs, and there are ~1500 songs.
jordan@1 97 if mod(i,100)==0,
jordan@1 98 toc
jordan@4 99 fprintf('Getting there. We have done %i out of %i songs so far.\n',i,size(datacube,1))
jordan@1 100 end
jordan@1 101 end
jordan@1 102 fprintf('Done!\nJust tidying up now.......')
jordan@1 103 % You can wind up with NaNs. Get rid of them please.
jordan@1 104 extracube(isnan(extracube)) = 0;
jordan@1 105
jordan@1 106
jordan@1 107 % You might think we are done, but we are not! We now create a few handy vectors to remind
jordan@1 108 % us of what these metrics actually are. There is so much data... hard to keep track of it all.
jordan@1 109 %
jordan@1 110 % Datacube columns (14, 1-14):
jordan@1 111 % "S_o,S_u,pw_f, pw_p, pw_r, rand, bf1, bp1, br1, bf6, bp6, br6, mt2c, mc2t"
jordan@1 112 % That is:
jordan@1 113 % 1,2 oversegmentation and undersegmentation scores
jordan@1 114 % 3,4,5 pairwise f-measure, precision and recall
jordan@1 115 % 6 Rand index
jordan@1 116 % 7,8,9 boundary f-measure, precision and recall with a threshold of 0.5 seconds
jordan@1 117 % 10,11,12 boundary f-measure, precision and recall with a threshold of 3 seconds
jordan@1 118 % 13,14 median true-to-claim and claim-to-true distances
jordan@1 119 %
jordan@1 120 % Newcube columns (12, 15-26): dset, song length,
jordan@1 121 % n_segs_ann, n_labs_ann, mean_seg_len_ann, n_segs_per_lab_ann,
jordan@1 122 % n_segs_est, n_labs_est, mean_seg_len_est, n_segs_per_lab_est,
jordan@1 123 % overseg_bound, overseg_label
jordan@1 124 %
jordan@1 125 % ExtraCube columns (24, 27-50):
jordan@1 126 % vector_lab = [pw_f, pw_p, pw_r, K, asp, acp, I_AE, H_EA, H_AE, S_o, S_u, rand];
jordan@1 127 % vector_seg = [mt2c, mc2t, m, f, d_ae, d_ea, b_f1, b_p1, b_r1, b_f6, b_p6, b_r6];
jordan@1 128 % That is, for the labelling metrics:
jordan@1 129 % 1,2,3 pairwise f-measure, precision and recall
jordan@1 130 % 4,5,6 K, average speaker purity, average cluster purity
jordan@1 131 % 7,8,9 mutual information and both conditional entropies
jordan@1 132 % 10,11 oversegmentation and undersegmentation scores
jordan@1 133 % 12 Rand index;
jordan@1 134 % And the boundary metrics:
jordan@1 135 % 1,2 median true-to-claim and claim-to-true distances
jordan@1 136 % 3,4 missed boundaries and fragmentation scores
jordan@1 137 % 5,6 directional hamming distance and inverse directional hamming distance
jordan@1 138 % 7,8,9 boundary f-measure, precision and recall with a threshold of 0.5 seconds
jordan@1 139 % 10,11,12 boundary f-measure, precision and recall with a threshold of 3 seconds
jordan@1 140
jordan@1 141 column_labels = ...
jordan@1 142 ... % Datacube:
jordan@1 143 {'S_o','S_u','pw_f','pw_p','pw_r', ...
jordan@1 144 'rand','bf1','bp1','br1','bf6', ...
jordan@1 145 ...
jordan@1 146 'bp6','br6','mt2c','mc2t',...
jordan@1 147 ... % Newcube:
jordan@1 148 'ds', ...
jordan@1 149 'len','nsa','nla','msla','nspla', ...
jordan@1 150 ...
jordan@1 151 'nse','nle','msle','nsple','ob', ...
jordan@1 152 'ol',...
jordan@1 153 ... % Extracube:
jordan@1 154 'pw_f','pw_p','pw_r','K', ...
jordan@1 155 ...
jordan@1 156 'asp','acp','I_AE','H_EA','H_AE', ...
jordan@1 157 'S_o','S_u','rand','mt2c','mc2t', ...
jordan@1 158 ...
jordan@1 159 'm','f','d_ae','d_ea','b_f1', ...
jordan@1 160 'b_p1','b_r1','b_f6','b_p6','b_r6'};
jordan@1 161
jordan@1 162 % It is actually nice, for later on, to have these formatted slightly more prettily.
jordan@1 163 % Also, we would rather retain '1-f' and '1-m' than f and m, so we make this switch now:
jordan@1 164 extracube(:,[15, 16],:) = 1-extracube(:,[15, 16],:);
jordan@1 165 column_labels = {'S_O','S_U','pw_f','pw_p','pw_r','Rand','bf_{.5}','bp_{.5}','br_{.5}','bf_3','bp_3','br_3','mt2c','mc2t','ds','len','ns_a','nl_a','msl_a','nspl_a','ns_e','nl_e','msl_e','nspl_e','ob','ol','pw_f_x','pw_p_x','pw_r_x','K','asp','acp','I_AE_x','H_EA_x','H_AE_x','S_o_x','S_u_x','rand','mt2c_x','mc2t_x','1-m','1-f','d_ae_x','d_ea_x','b_f1_x','b_p1_x','b_r1_x','b_f6_x','b_p6_x','b_r6_x'};
jordan@1 166
jordan@1 167 % We will manually create lists of indices (with matching labels) for the sets of metrics we want to examine.
jordan@1 168 % The indices are into MEGACUBE, which is the concatenation of DATACUBE (mirex evaluation), NEWCUBE (extra parameters based on mirex data), and EXTRACUBE (my computations: K, asp, acp, etc.).
jordan@1 169 sind_manual1 = [3,6,30,5,1,31,4,2,32]; % pwf, rand, K, pwp, S_O, acp, pwr, S_U, asp
jordan@1 170 sind_manual2 = [10,7,11,8,42,14,12,9,41,13]; % bf3, bf.5, bp3, bp.5, f, mc2tm br3m br.5, m, mt2c
jordan@1 171 sind_manual3 = [16:26]; % len, nsa, nla, msla, nspla, nse, nle, msle, nsple, ob, ol
jordan@1 172 indexing_info(1).manual_set = sind_manual1;
jordan@1 173 indexing_info(2).manual_set = sind_manual2;
jordan@1 174 indexing_info(3).manual_set = sind_manual3;
jordan@1 175 for i=1:3,
jordan@1 176 indexing_info(i).labels = column_labels(indexing_info(i).manual_set);
jordan@1 177 indexing_info(i).all_labels = column_labels;
jordan@1 178 end
jordan@1 179
jordan@1 180 % Running this script is time-consuming and tedious. Once again, we save our work.
jordan@1 181 save('./datacubes','datacube','newcube','extracube','indexing_info')
jordan@1 182
jordan@1 183 fprintf('Finished! Goodbye.\n')