Mercurial > hg > camir-aes2014
comparison core/magnatagatune/tests_evals/rbm_subspace/Exp_normalise_deltas.m @ 0:e9a9cd732c1e tip
first hg version after svn
author | wolffd |
---|---|
date | Tue, 10 Feb 2015 15:05:51 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:e9a9cd732c1e |
---|---|
1 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
2 % Experiment code templat % | |
3 % Project: sub-euclidean distance for music similarity, | |
4 % in the last part all the | |
5 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
6 %% Load features | |
7 feature_file = 'rel_music_raw_features.mat'; | |
8 vars = whos('-file', feature_file); | |
9 A = load(feature_file,vars(1).name,vars(2).name,vars(3).name,vars(4).name); | |
10 raw_features = A.(vars(1).name); | |
11 indices = A.(vars(2).name); | |
12 tst_inx = A.(vars(3).name); | |
13 trn_inx = A.(vars(4).name); | |
14 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
15 % Define directory to save parameters & results | |
16 % dir = '/home/funzi/Documents/'; | |
17 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
18 dmr = [0 5 10 20 30 50]; % dimension reduction by PCA | |
19 ws = [0 5 10 20 30 50 70]; % window size | |
20 % parameters of rbm (if it is used for extraction) | |
21 hidNum = [30 50 100 500]; | |
22 lr_1 = [0.05 0.1 0.5]; | |
23 lr_2 = [0.1 0.5 0.7]; | |
24 mmt = [0.02 0.05 0.1]; | |
25 cost = [0.00002 0.01 0.1]; | |
26 | |
27 %% Select parameters (if grid-search is not applied) | |
28 di = 1; | |
29 wi = 1; | |
30 hi = 1; | |
31 l1i = 1; | |
32 l2i = 1; | |
33 mi = 1; | |
34 ci = 1; | |
35 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
36 % If grid search is define | |
37 % log_file = strcat(dir,'exp_.mat'); | |
38 % inx = resume_from_grid(log_file,8); | |
39 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
40 %% Feature extraction | |
41 EXT_TYPE = 2; | |
42 switch (EXT_TYPE) | |
43 case 1 % Using PCA | |
44 assert(~exist('OCTAVE_VERSION'),'This script cannot run in octave'); | |
45 coeff = princomp(raw_features); | |
46 coeff = coeff(:,1:end-dmr(di)); % Change value of dmr(di) to reduce the dimensionality | |
47 features = raw_features*coeff; | |
48 % normalizing | |
49 mm = minmax(features')'; | |
50 inn= (find(mm(1,:)~=mm(2,:))); | |
51 mm = mm(:,inn); | |
52 features = features(:,inn); | |
53 features = (features-repmat(mm(1,:),size(features,1),1))./(repmat(mm(2,:),size(features,1),1)-repmat(mm(1,:),size(features,1),1)); | |
54 case 2 % Using rbm | |
55 conf.hidNum = hidNum(hi); | |
56 conf.eNum = 100; | |
57 conf.sNum = size(raw_features,1); | |
58 conf.bNum = 1; | |
59 conf.gNum = 1; | |
60 conf.params = [lr_1(l1i) lr_2(l2i) mmt(mi) cost(ci)]; | |
61 conf.N = 50; | |
62 conf.MAX_INC = 10; | |
63 W1 = zeros(0,0); | |
64 [W1 vB1 hB1] = training_rbm_(conf,W1,raw_features); | |
65 features = raw_features*W1 + repmat(hB1,conf.sNum,1); | |
66 end | |
67 | |
68 %% Sub-euclidean computation | |
69 num_case = size(trn_inx,1); | |
70 trnd_12 = cell(1,num_case); | |
71 trnd_13 = cell(1,num_case); | |
72 tstd_12 = cell(1,num_case); | |
73 tstd_13 = cell(1,num_case); | |
74 | |
75 w = ws(wi); | |
76 | |
77 % w = subspace window size | |
78 if w == 0 % trnd_12 = d(a,b) , trnd_13= d(a,c) | |
79 for i = 1:num_case % over all cross-validation folds (num_case) | |
80 [trnd_12{i} trnd_13{i}] = simple_dist(trn_inx{i},features,indices); | |
81 [tstd_12{i} tstd_13{i}] = simple_dist(tst_inx{i},features,indices); | |
82 end | |
83 else | |
84 for i = 1:num_case % for w > 1 | |
85 [trnd_12{i} trnd_13{i}] = conv_euclidean_dist(trn_inx{i},features,indices,w,1); %% normalize is better than no normalize | |
86 [tstd_12{i} tstd_13{i}] = conv_euclidean_dist(tst_inx{i},features,indices,w,1); | |
87 end | |
88 end | |
89 %% Data preparation | |
90 trn_dat1 = cell(1,num_case); | |
91 trn_dat2 = cell(1,num_case); | |
92 tst_dat1 = cell(1,num_case); | |
93 tst_dat2 = cell(1,num_case); | |
94 | |
95 for i=1:num_case | |
96 %=> Compute hypothesis | |
97 trn_dat1{i} = trnd_13{i} - trnd_12{i}; | |
98 trn_dat2{i} = trnd_12{i} - trnd_13{i}; | |
99 tst_dat1{i} = tstd_13{i} - tstd_12{i}; | |
100 tst_dat2{i} = tstd_12{i} - tstd_13{i}; | |
101 | |
102 | |
103 % --- | |
104 % Cheat: Normalize over all training and test delta values using min-max | |
105 % Son reports this can give about 95% accuracy | |
106 % --- | |
107 | |
108 mm = minmax([trn_dat1{i};tst_dat1{i}]')'; | |
109 inn= find(mm(1,:)~=mm(2,:)); | |
110 mm = mm(:,inn); | |
111 trn_dat1{i} = | |
112 (trn_dat1{i}(:,inn)-repmat(mm(1,:),size(trn_dat1{i},1),1))./repmat(mm(2,:)-mm(1,:),size(trn_dat1{i},1),1); | |
113 tst_dat1{i} = (tst_dat1{i}(:,inn)-repmat(mm(1,:),size(tst_dat1{i},1),1))./repmat(mm(2,:)-mm(1,:),size(tst_dat1{i},1),1); | |
114 | |
115 mm = minmax([trn_dat2{i};tst_dat2{i}]'); | |
116 inn= find(mm(1,:)~=mm(2,:)); | |
117 mm = mm(:,inn); | |
118 trn_dat2{i} = | |
119 (trn_dat2{i}(:,inn)-repmat(mm(1,:),size(trn_dat2{i},1),1))./repmat(mm(2,:)-mm(1,:),size(trn_dat2{i},1),1); | |
120 tst_dat2{i} = (tst_dat2{i}(:,inn)-repmat(mm(1,:),size(tst_dat2{i},1),1))./repmat(mm(2,:)-mm(1,:),size(tst_dat2{i},1),1); | |
121 | |
122 | |
123 | |
124 | |
125 end |