Mercurial > hg > webaudioevaluationtool
changeset 3005:cf4bf84c3e7b
Merge branch 'vnext' into Dev_main
author | Nicholas Jillings <n.g.r.jillings@se14.qmul.ac.uk> |
---|---|
date | Thu, 03 Aug 2017 14:51:18 +0200 |
parents | 15f266931a2d (diff) f57581f20068 (current diff) |
children | 50f5e11a38b7 |
files | |
diffstat | 9 files changed, 293 insertions(+), 32 deletions(-) [+] |
line wrap: on
line diff
--- a/.gitignore Thu Aug 03 14:51:01 2017 +0200 +++ b/.gitignore Thu Aug 03 14:51:18 2017 +0200 @@ -11,3 +11,6 @@ *.DS_STORE *.swp *.swo +saves/ratings/* +saves/timelines/* +saves/timelines_movement/*
--- a/interfaces/ordinal.js Thu Aug 03 14:51:01 2017 +0200 +++ b/interfaces/ordinal.js Thu Aug 03 14:51:18 2017 +0200 @@ -222,6 +222,7 @@ root.addEventListener('dragleave', this, true); root.addEventListener('drop', this, true); root.addEventListener('dragend', this, true); + this.dragging = false; this.handleEvent = function (event) { if (event.type == "click") { if (playing === false) { @@ -252,6 +253,7 @@ e.dataTransfer.effectAllowed = 'move'; e.dataTransfer.setData('text/plain', audioObject.id); + this.dragging = "true"; } function dragEnter(e) { @@ -267,13 +269,10 @@ if (e.preventDefault) { e.preventDefault(); // Necessary. Allows us to drop. } - e.dataTransfer.dropEffect = 'move'; // See the section on the DataTransfer object. - - var srcid = Number(e.dataTransfer.getData("text/plain")); var elements = container.childNodes; var srcObject = audioEngineContext.audioObjects.find(function (ao) { - return ao.id === srcid; + return ao.interfaceDOM.dragging; }); var src = srcObject.interfaceDOM.root; if (src !== root) { @@ -293,8 +292,6 @@ } } - - return false; } function drop(e) { @@ -318,6 +315,7 @@ // this/e.target is the source node. $(".ordinal-element").removeClass("dragging"); $(".ordinal-element").removeClass("over"); + this.dragging = false; } this.getElementPosition = function () {
--- a/js/core.js Thu Aug 03 14:51:01 2017 +0200 +++ b/js/core.js Thu Aug 03 14:51:18 2017 +0200 @@ -3575,7 +3575,7 @@ returnURL = specification.projectReturn; } } - xmlhttp.open("POST", returnURL + "php/save.php?key=" + this.key + "&saveFilenamePrefix=" + this.parent.filenamePrefix); + xmlhttp.open("POST", returnURL + "php/save.php?key=" + this.key + "&saveFilenamePrefix=" + this.parent.filenamePrefix + "&state=update"); xmlhttp.setRequestHeader('Content-Type', 'text/xml'); xmlhttp.onerror = function () { console.log('Error updating file to server!'); @@ -3613,6 +3613,7 @@ } else { saveURL += this.parent.filenamePrefix; } + saveURL += "&state=finish"; return new Promise(function (resolve, reject) { var xmlhttp = new XMLHttpRequest(); xmlhttp.open("POST", saveURL);
--- a/php/save.php Thu Aug 03 14:51:01 2017 +0200 +++ b/php/save.php Thu Aug 03 14:51:18 2017 +0200 @@ -33,7 +33,17 @@ } $postText = file_get_contents('php://input'); $file_key = $_GET['key']; -$filename = '../saves/'.$saveFilenamePrefix.$file_key.".xml"; + +$update = false; +if (isset($_GET["update"])) { + $update = $_GET["update"] == "update"; +} + +if ($update) { + $filename = '../saves/update-'.$saveFilenamePrefix.$file_key.".xml"; +} else { + $filename = '../saves/'.$saveFilenamePrefix.$file_key.".xml"; +} if (!file_exists($filename)) { die('<response state="error"><message>Could not find save</message></response>'); @@ -132,4 +142,8 @@ // Return XML confirmation data $xml = '<response state="OK"><message>OK</message><file bytes="'.$wbytes.'">"'.$filename.'"</file></response>'; echo $xml; + +if (!$update) { + unlink('../saves/update-'.$saveFilenamePrefix.$file_key.".xml"); +} ?>
--- a/python/generate_report.py Thu Aug 03 14:51:01 2017 +0200 +++ b/python/generate_report.py Thu Aug 03 14:51:18 2017 +0200 @@ -227,17 +227,18 @@ # number of comments (interesting if comments not mandatory) for audioelement in audioelements: - response = audioelement.find("./comment/response") - was_played = audioelement.find("./metric/metricresult/[@name='elementFlagListenedTo']") - was_moved = audioelement.find("./metric/metricresult/[@name='elementFlagMoved']") - if response is not None and response.text is not None and len(response.text) > 1: - number_of_comments += 1 - else: - number_of_missing_comments += 1 - if was_played is not None and was_played.text == 'false': - not_played.append(audioelement.get('name')) - if was_moved is not None and was_moved.text == 'false': - not_moved.append(audioelement.get('name')) + if audioelement.get("type") != "outside-reference": + response = audioelement.find("./comment/response") + was_played = audioelement.find("./metric/metricresult/[@name='elementFlagListenedTo']") + was_moved = audioelement.find("./metric/metricresult/[@name='elementFlagMoved']") + if response is not None and response.text is not None and len(response.text) > 1: + number_of_comments += 1 + else: + number_of_missing_comments += 1 + if was_played is not None and was_played.text == 'false': + not_played.append(audioelement.get('name')) + if was_moved is not None and was_moved.text == 'false': + not_moved.append(audioelement.get('name')) # update global counters total_empty_comments += number_of_missing_comments
--- a/python/pythonServer.py Thu Aug 03 14:51:01 2017 +0200 +++ b/python/pythonServer.py Thu Aug 03 14:51:18 2017 +0200 @@ -138,12 +138,15 @@ global curSaveIndex options = self.path.rsplit('?') options = options[1].rsplit('&') + update = False for option in options: optionPair = option.rsplit('=') if optionPair[0] == "key": key = optionPair[1] elif optionPair[0] == "saveFilenamePrefix": prefix = optionPair[1] + elif optionPair[0] == "state": + update = optionPair[1] == "update" if key == None: self.send_response(404) return @@ -153,6 +156,8 @@ postVars = self.rfile.read(varLen) print("Saving file key "+key) filename = prefix+'-'+key+'.xml' + if update: + filename = "update-"+filename file = open('../saves/'+filename,'wb') file.write(postVars) file.close() @@ -173,6 +178,8 @@ self.wfile.write(bytes(reply, "utf-8")) curSaveIndex += 1 curFileName = 'test-'+str(curSaveIndex)+'.xml' + if update == False: + os.remove("../saves/update-"+filename) def poolXML(s): pool = ET.parse('../tests/pool.xml')
--- a/python/score_parser.py Thu Aug 03 14:51:01 2017 +0200 +++ b/python/score_parser.py Thu Aug 03 14:51:18 2017 +0200 @@ -58,17 +58,16 @@ # Check if page in the store if storage.get(page_name) == None: - storage[page_name] = {'header':[], 'axis':{}} # add to the store + storage[page_name] = {'header':[], 'axis':{"default": {}}} # add to the store # Get the axis names pageConfig = root.find('./waet/page/[@id="'+page_name+'"]') for interface in pageConfig.findall('./interface'): # Get the <interface> noeds interfaceName = interface.get("name"); # Get the axis name - if interfaceName == None: + if interfaceName == None or interfaceName == "null": interfaceName = "default" # If name not set, make name 'default' - if storage[page_name]['axis'].get(interfaceName) == None: + if interfaceName not in storage[page_name]['axis'].keys(): storage[page_name]['axis'][interfaceName] = {} # If not in store for page, add empty dict - storage[page_name]['axis'][interfaceName][subject_id] = [] # Add the store for the session # header: fragment IDs in 'alphabetical' order # go to fragment column, or create new column if it doesn't exist yet @@ -76,7 +75,8 @@ # get alphabetical array of fragment IDs from this subject's XML fragmentnamelist = [] # make empty list for audioelement in page.findall("./audioelement"): # iterate over all audioelements - fragmentnamelist.append(audioelement.get('ref')) # add to list + if audioelement.get("type") != "outside-reference": + fragmentnamelist.append(audioelement.get('ref')) # add to list fragmentnamelist = sorted(fragmentnamelist); # Sort the list storage[page_name]['header'] = fragmentnamelist; @@ -87,11 +87,17 @@ axisName = value.get('interface-name') if axisName == None or axisName == "null": axisName = 'default' + print(storage[page_name]['axis']) axisStore = storage[page_name]['axis'][axisName] + try: + subjectStore = axisStore[subject_id] + except KeyError: + axisStore[subject_id] = [] + subjectStore = axisStore[subject_id] if hasattr(value, 'text'): - axisStore[subject_id].append(value.text) + subjectStore.append(value.text) else: - axisStore[subject_id].append('') + subjectStore.append('') # Now create the individual files for page_name in storage:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/python/survey_parser.py Thu Aug 03 14:51:18 2017 +0200 @@ -0,0 +1,211 @@ +#!/usr/bin/python +import xml.etree.ElementTree as ET +import os +import sys +import csv +import matplotlib.pyplot as plt +import numpy as np +import scipy as sp +import scipy.stats + + +# COMMAND LINE ARGUMENTS + +assert len(sys.argv)<3, "score_parser takes at most 1 command line argument\n"+\ + "Use: python score_parser.py [rating_folder_location]" + +# XML results files location +if len(sys.argv) == 1: + folder_name = "../saves" # Looks in 'saves/' folder from 'scripts/' folder + print("Use: python score_parser.py [rating_folder_location]") + print("Using default path: " + folder_name) +elif len(sys.argv) == 2: + folder_name = sys.argv[1] # First command line argument is folder + +# check if folder_name exists +if not os.path.exists(folder_name): + #the file is not there + print("Folder '"+folder_name+"' does not exist.") + sys.exit() # terminate script execution +elif not os.access(os.path.dirname(folder_name), os.W_OK): + #the file does exist but write privileges are not given + print("No write privileges in folder '"+folder_name+"'.") + +# CODE + +storage = {"globals":{}, "pages": {}} + +def decodeSurveyTree(session_id, surveyroot, store): + # Get all the childs + for survey_entry in list(surveyroot): + survey_id = survey_entry.get("ref") + if survey_id not in store.keys(): + store[survey_id] = {"responses": []} + survey_type = survey_entry.get("type") + store[survey_id]["type"] = survey_type + if survey_type == "statement" or survey_type == "video": + if "header" not in store[survey_id]: + store[survey_id]["header"] = ("ids", "duration") + store[survey_id] = decodeSurveyStatement(session_id, survey_entry, store[survey_id]) + elif survey_type == "question" or survey_type == "number" or survey_type == "slider": + if "header" not in store[survey_id]: + store[survey_id]["header"] = ("ids", "durations", "response") + store[survey_id] = decodeSurveyQuestion(session_id, survey_entry, store[survey_id]) + elif survey_type == "checkbox": + if "header" not in store[survey_id]: + head = ["ids", "duration"] + for option in survey_entry.findall("./response"): + head.append(option.get("name")) + store[survey_id]["header"] = tuple(head) + store[survey_id] = decodeSurveyCheckbox(session_id, survey_entry, store[survey_id]) + elif survey_type == "radio": + if "header" not in store[survey_id]: + store[survey_id]["header"] = ("ids", "duration", "response") + store[survey_id] = decodeSurveyRadio(session_id, survey_entry, store[survey_id]) + return store + +def decodeSurveyStatement(session_id, survey_entry, store): + resp = (session_id, survey_entry.get("duration")) + store["responses"].append(resp) + return store + +def decodeSurveyQuestion(session_id, survey_entry, store): + resp = (session_id, survey_entry.get("duration"), survey_entry.find("./response").text) + store["responses"].append(resp) + return store + +def decodeSurveyCheckbox(session_id, survey_entry, store): + response = [session_id, survey_entry.get("duration")] + for node in survey_entry.findall("./response"): + response.append(node.get("checked")) + store["responses"].append(tuple(response)) + return store + +def decodeSurveyRadio(session_id, survey_entry, store): + response = (session_id, survey_entry.get("duration"), survey_entry.find("./response").get("name")) + store["responses"].append(response) + return store + +if folder_name.endswith("/") is False: + folder_name += "/" + +# Create the folder 'surveys' if not yet created +if not os.path.exists(folder_name + 'surveys'): + os.makedirs(folder_name + 'surveys') + +#Iterate through every XML file in folder_name +for file_name in os.listdir(folder_name): + if file_name.endswith(".xml"): + tree = ET.parse(folder_name +file_name) + root = tree.getroot() + subject_id = root.get('key') + pre_survey = root.find("./survey[@location='pre']") + if len(pre_survey) is not 0: + if "pre" not in storage["globals"].keys(): + storage["globals"]["pre"] = {} + storage["globals"]["pre"] = decodeSurveyTree(subject_id, pre_survey, storage["globals"]["pre"]) + post_survey = root.find("./survey[@location='post']") + if len(post_survey) is not 0: + if "post" not in storage["globals"].keys(): + storage["globals"]["post"] = {} + storage["globals"]["post"] = decodeSurveyTree(subject_id, post_survey, storage["globals"]["post"]) + + # Now iterate through the page specifics + for page in root.findall("./page[@state='complete']"): + page_name = page.get("ref") + pre_survey = page.find("./survey[@location='pre']") + try: + page_store = storage["pages"][page_name] + except KeyError: + storage["pages"][page_name] = {} + page_store = storage["pages"][page_name] + if len(pre_survey) is not 0: + if "pre" not in page_store.keys(): + page_store["pre"] = {} + page_store["pre"] = decodeSurveyTree(subject_id, pre_survey, page_store["pre"]) + post_survey = page.find("./survey[@location='post']") + if len(post_survey) is not 0: + if "post" not in page_store.keys(): + page_store["post"] = {} + page_store["post"] = decodeSurveyTree(subject_id, post_survey, page_store["post"]) + +#Storage now holds entire survey structure +# Time to start exporting to files + +# Store globals +file_store_root = folder_name + 'surveys/' +for position in storage["globals"].keys(): + for ref in storage["globals"][position].keys(): + with open(file_store_root+ref+".csv", "w") as f: + filewriter = csv.writer(f, delimiter=",") + filewriter.writerow(storage["globals"][position][ref]["header"]) + for row in storage["globals"][position][ref]["responses"]: + filewriter.writerow(row) +for page_name in storage["pages"].keys(): + for position in storage["pages"][page_name].keys(): + if not os.path.exists(file_store_root + page_name): + os.makedirs(file_store_root + page_name) + for ref in storage["pages"][page_name][position].keys(): + with open(file_store_root+page_name+"/"+ref+".csv", "w") as f: + filewriter = csv.writer(f, delimiter=",") + filewriter.writerow(storage["pages"][page_name][position][ref]["header"]) + for row in storage["pages"][page_name][position][ref]["responses"]: + filewriter.writerow(row) + +#Time to plot + +def plotDurationHistogram(store, plot_id, saveloc): + x = [] + for row in store["responses"]: + x.append(float(row[1])) + x = np.asarray(x) + plt.figure() + n, bins, patches = plt.hist(x, 10, facecolor='green', alpha=0.75) + plt.xlabel("Duration") + plt.ylabel("Count") + plt.grid(True) + plt.title("Histogram of durations for "+plot_id) + plt.savefig(saveloc+plot_id+"-duration.pdf", bbox_inches='tight') + +def plotRadio(store, plot_id, saveloc): + plt.figure() + data = {} + for row in store["responses"]: + try: + data[row[2]] += 1 + except KeyError: + data[row[2]] = 1 + labels = data.keys() + sizes = data.values() + plt.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90) + plt.title("Selections of "+plot_id) + plt.savefig(saveloc+plot_id+".pdf", bbox_inches='tight') + +def plotCheckbox(store, plot_id, saveloc): + data = [] + labels = [] + for h in store["header"][2::1]: + labels.append(h) + data.append(0) + for row in store["responses"]: + for i in range(2, len(labels)+2): + if row[i] == "true": + data[i-2] += 1 + x = scipy.arange(4) + plt.figure() + plt.bar(x, data, width=0.8) + plt.xticks(x+0.4, labels) + plt.xlabel("Option") + plt.ylabel("Count") + plt.title("Selection counts of "+plot_id) + plt.savefig(saveloc+plot_id+".pdf", bbox_inches='tight') + +for page_name in storage["pages"].keys(): + for position in storage["pages"][page_name].keys(): + saveloc = file_store_root+page_name+"/" + for ref in storage["pages"][page_name][position].keys(): + plotDurationHistogram(storage["pages"][page_name][position][ref],ref, saveloc) + if storage["pages"][page_name][position][ref]["type"] == "radio": + plotRadio(storage["pages"][page_name][position][ref],ref, saveloc) + if storage["pages"][page_name][position][ref]["type"] == "checkbox": + plotCheckbox(storage["pages"][page_name][position][ref],ref, saveloc) \ No newline at end of file
--- a/python/timeline_view_movement.py Thu Aug 03 14:51:01 2017 +0200 +++ b/python/timeline_view_movement.py Thu Aug 03 14:51:18 2017 +0200 @@ -74,6 +74,10 @@ if page_name is None: # ignore 'empty' audio_holders print("Skipping empty page name from "+subject_id+".") break + + if page.get("state") != "complete": + print("Skipping non-completed page "+page_name+" from "+subject_id+".") + break # subtract total page length from subsequent page event times page_time_temp = page.find("./metric/metricresult/[@id='testTime']") @@ -108,11 +112,20 @@ if audioelement is not None: # Check it exists audio_id = str(audioelement.get('ref')) - # break if no initial position or move events registered + # break if outside-reference + if audioelement.get("type") == "outside-reference": + break; + + # break if no initial position.... initial_position_temp = audioelement.find("./metric/metricresult/[@name='elementInitialPosition']") if initial_position_temp is None: print("Skipping "+page_name+" from "+subject_id+": does not have initial positions specified.") break + # ... or move events registered + movements = audioelement.find("./metric/metricresult[@name='elementTrackerFull']") + if movements is None: + print("Skipping "+page_name+" from "+subject_id+": does not have trackers.") + break # get move events, initial and eventual position initial_position = float(initial_position_temp.text) @@ -299,13 +312,20 @@ interfaces = page_setup.findall("./interface") interface_title = interfaces[0].find("./title") scales = interfaces[0].findall("./scales") # get first interface by default - scalelabels = scales[0].findall("./scalelabel") # get first scale by default - + labelpos = [] # array of scalelabel positions labelstr = [] # array of strings at labels - for scalelabel in scalelabels: - labelpos.append(float(scalelabel.get('position'))/100.0) - labelstr.append(scalelabel.text) + + # No scales given. Use normal floats + if len(scales) is 0: + labelpos = [0.0, 1.0] + labelstr = ["0", "100"] + else: + scalelabels = scales[0].findall("./scalelabel") # get first scale by default + + for scalelabel in scalelabels: + labelpos.append(float(scalelabel.get('position'))/100.0) + labelstr.append(scalelabel.text) # use interface name as Y axis label if interface_title is not None: