Mercurial > hg > aimc
comparison trunk/experiments/scripts/cnbh-syllables/results_plotting/gen_results.py @ 381:e6f006238034
- Python results plotting (finally).
- Proper results reporting script.
- Test on ALL talkers. The results script then generates a summary based on all the various subsets.
- Fixed chown users (hopefully sudos to be deleted entirely soon)
- More...
author | tomwalters |
---|---|
date | Mon, 13 Sep 2010 18:34:23 +0000 |
parents | |
children | 068e33a65f8d |
comparison
equal
deleted
inserted
replaced
380:0780b552b4bf | 381:e6f006238034 |
---|---|
1 #!/usr/bin/env python | |
2 # encoding: utf-8 | |
3 """ | |
4 gen_results.py | |
5 | |
6 Created by Thomas Walters on 2010-09-12. | |
7 """ | |
8 | |
9 import sys | |
10 import getopt | |
11 import re | |
12 | |
13 | |
14 help_message = ''' | |
15 Generate a file containing all the results for a run of the | |
16 syllable recognition experiment. Expected input is a 'misclassificaions' | |
17 file of the type generated by run_test_instance.sh, along with the locations | |
18 of files containing the training talkers and all the talkers that the system | |
19 was tested on. | |
20 | |
21 Arguments: | |
22 -i --input_file | |
23 -t --train_talkers | |
24 -s --test_talkers | |
25 -o --output_filename | |
26 -c --value_count | |
27 -p --spoke_pattern | |
28 ''' | |
29 | |
30 class Usage(Exception): | |
31 def __init__(self, msg): | |
32 self.msg = msg | |
33 | |
34 | |
35 def main(argv=None): | |
36 if argv is None: | |
37 argv = sys.argv | |
38 try: | |
39 try: | |
40 opts, args = getopt.getopt(argv[1:], "hi:t:s:o:c:p:v", | |
41 ["help", "input_file=", "train_talkers=", | |
42 "test_talkers=", "output_filename=", | |
43 "value_count=", "spoke_pattern="]) | |
44 except getopt.error, msg: | |
45 raise Usage(msg) | |
46 | |
47 # defaults | |
48 input_file = "misclassified_syllables_iteration_15" | |
49 train_talkers = "training_talkers" | |
50 test_talkers = "testing_talkers" | |
51 output_filename = "results.txt" | |
52 total_value_count = 185 | |
53 spoke_pattern_file = "spoke_pattern.txt" | |
54 | |
55 # option processing | |
56 for option, value in opts: | |
57 if option == "-v": | |
58 verbose = True | |
59 if option in ("-h", "--help"): | |
60 raise Usage(help_message) | |
61 if option in ("-i", "--input_file"): | |
62 input_file = value | |
63 if option in ("-t", "--train_talkers"): | |
64 train_talkers = value | |
65 if option in ("-s", "--test_talkers"): | |
66 test_talkers = value | |
67 if option in ("-c", "--value_count"): | |
68 total_value_count = int(value) | |
69 if option in ("-p", "--spoke_pattern_file"): | |
70 spoke_pattern_file = value | |
71 | |
72 except Usage, err: | |
73 print >> sys.stderr, sys.argv[0].split("/")[-1] + ": " + str(err.msg) | |
74 print >> sys.stderr, "\t for help use --help" | |
75 return 2 | |
76 | |
77 results = dict() | |
78 f = open(input_file, 'r') | |
79 for line in f: | |
80 values = line.strip().split(' ') | |
81 results[values[1]]=100*(1-float(values[0])/total_value_count) | |
82 | |
83 f = open(test_talkers, 'r') | |
84 test_talkers_list = f.readlines() | |
85 f.close() | |
86 | |
87 f = open(train_talkers, 'r') | |
88 train_talkers_list = f.readlines() | |
89 f.close() | |
90 | |
91 spoke_pattern = [] | |
92 f = open(spoke_pattern_file, 'r') | |
93 for line in f: | |
94 spoke_pattern.append(line.strip().split(' ')) | |
95 | |
96 all_talkers_list = [] | |
97 all_talkers_list.extend(train_talkers_list) | |
98 all_talkers_list.extend(test_talkers_list) | |
99 | |
100 # Here I make the rather rash assumption that the model was tested on all talkers | |
101 # this should be true if the training and testing was done using my scripts. | |
102 for t in all_talkers_list: | |
103 results.setdefault(t.strip(), 100.0) | |
104 | |
105 total_score = 0.0 | |
106 element_count = 0 | |
107 for t in train_talkers_list: | |
108 total_score += results[t.strip()] | |
109 element_count += 1 | |
110 score = total_score / element_count | |
111 print ("# Score on training talkers: %.1f%%" % score) | |
112 | |
113 total_score = 0.0 | |
114 element_count = 0 | |
115 for t in all_talkers_list: | |
116 total_score += results[t.strip()] | |
117 element_count += 1 | |
118 score = total_score / element_count | |
119 print ("# Score on all talkers: %.1f%%" % score) | |
120 | |
121 total_score = 0.0 | |
122 element_count = 0 | |
123 for t in test_talkers_list: | |
124 total_score += results[t.strip()] | |
125 element_count += 1 | |
126 score = total_score / element_count | |
127 print ("# Score on test talkers: %.1f%%" % score) | |
128 | |
129 score = results[spoke_pattern[0][0]] | |
130 print ("# Score on central talker: %.1f" % score) | |
131 | |
132 for s in xrange(1,9): | |
133 print ("# Results for spoke %d" % s) | |
134 for p in xrange(0, 7): | |
135 score = results[spoke_pattern[s][p]] | |
136 m = re.match('(.*)p(.*)s', spoke_pattern[s][p]) | |
137 gpr = m.group(1) | |
138 vtl=m.group(2) | |
139 print ("%s %s %s" % (gpr, vtl, score)) | |
140 | |
141 if __name__ == "__main__": | |
142 sys.exit(main()) |