annotate docs/Instructions/Instructions.tex @ 1163:fb062819d956

Updated the Instructions to match recent specification changes. wip.
author Nicholas Jillings <n.g.r.jillings@se14.qmul.ac.uk>
date Tue, 26 Jan 2016 18:03:03 +0000
parents 3edcbbea168b
children 69860305ac9e
rev   line source
n@1118 1 \documentclass[11pt, oneside]{article} % use "amsart" instead of "article" for AMSLaTeX format
n@1118 2 \usepackage{geometry} % See geometry.pdf to learn the layout options. There are lots.
n@1118 3 \geometry{letterpaper} % ... or a4paper or a5paper or ...
n@1118 4 %\geometry{landscape} % Activate for rotated page geometry
n@1118 5 \usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent
n@1118 6 \usepackage{graphicx} % Use pdf, png, jpg, or eps§ with pdflatex; use eps in DVI mode
n@1118 7 % TeX will automatically convert eps --> pdf in pdflatex
n@1118 8
n@1118 9 \usepackage{listings} % Source code
n@1118 10 \usepackage{xcolor} % colour (source code for instance)
n@1118 11 \definecolor{grey}{rgb}{0.1,0.1,0.1}
n@1118 12 \definecolor{darkblue}{rgb}{0.0,0.0,0.6}
n@1118 13 \definecolor{cyan}{rgb}{0.0,0.6,0.6}
n@1118 14
n@1118 15 \usepackage{amssymb}
n@1118 16 \usepackage{cite}
n@1118 17 \usepackage{hyperref} % Hyperlinks
n@1118 18 \usepackage[nottoc,numbib]{tocbibind} % 'References' in TOC
n@1118 19
n@1118 20 \graphicspath{{img/}} % Relative path where the images are stored.
n@1118 21
n@1118 22 \title{Instructions for \\ Web Audio Evaluation Tool}
n@1118 23 \author{Nicholas Jillings, Brecht De Man and David Moffat}
n@1118 24 \date{7 December 2015} % Activate to display a given date or no date
n@1118 25
n@1118 26 \begin{document}
n@1118 27 \maketitle
n@1118 28
n@1118 29 These instructions are about use of the Web Audio Evaluation Tool on Windows and Mac OS X platforms.
n@1118 30
n@1118 31 We request that you acknowledge the authors and cite our work when using it \cite{waet}, see also CITING.txt.
n@1118 32
n@1118 33 The tool is available in its entirety including source code on \url{https://code.soundsoftware.ac.uk/projects/webaudioevaluationtool/}, under the GNU General Public License v3.0 (\url{http://choosealicense.com/licenses/gpl-3.0/}), see also LICENSE.txt.
n@1118 34
n@1118 35 % TO DO: Linux (Android, iOS)
n@1118 36
n@1118 37 \tableofcontents
n@1118 38
n@1118 39 \clearpage
n@1118 40
n@1118 41 \section{Installation}
n@1118 42 Download the folder (\url{https://code.soundsoftware.ac.uk/hg/webaudioevaluationtool/archive/tip.zip}) and unzip in a location of your choice, or pull the source code from \url{https://code.soundsoftware.ac.uk/hg/webaudioevaluationtool} (Mercurial).
n@1118 43
n@1118 44 \subsection{Contents}
n@1118 45 The folder should contain the following elements: \\
n@1118 46
n@1118 47 \textbf{Main folder:}
n@1118 48 \begin{itemize}
n@1118 49 \item \texttt{analyse.html}: analysis and diagnostics of a set of result XML files
n@1163 50 \item \texttt{core.css, graphics.css, structure.css}: core style files (edit to change appearance)
n@1118 51 \item \texttt{CITING.txt, LICENSE.txt, README.txt}: text files with, respectively, the citation which we ask to include in any work where this tool or any portion thereof is used, modified or otherwise; the license under which the software is shared; and a general readme file referring to these instructions.
n@1118 52 \item \texttt{core.js}: JavaScript file with core functionality
n@1118 53 \item \texttt{index.html}: webpage where interface should appear (includes link to test configuration XML)
n@1118 54 \item \texttt{jquery-2.1.4.js}: jQuery JavaScript Library
n@1118 55 \item \texttt{loudness.js}: Allows for automatic calculation of loudness of Web Audio API Buffer objects, return gain values to correct for a target loudness or match loudness between multiple objects
n@1118 56 \item \texttt{pythonServer.py}: webserver for running tests locally
n@1118 57 \item \texttt{pythonServer-legacy.py}: webserver with limited functionality (no automatic storing of output XML files)
n@1118 58 \item \texttt{save.php}: PHP script to store result XML files to web server\\
n@1118 59 \end{itemize}
n@1118 60 \textbf{Documentation (./docs/)}
n@1118 61 \begin{itemize}
n@1118 62 \item \href{http://c4dm.eecs.qmul.ac.uk/dmrn/events/dmrnp10/#posters}{DMRN+10}: PDF and \LaTeX source of poster for 10\textsuperscript{th} Digital Music Research Network One-Day workshop (``soft launch'')
n@1118 63 \item Instructions: PDF and \LaTeX source of these instructions
n@1118 64 \item Project Specification Document (\LaTeX/PDF)
n@1118 65 \item Results Specification Document (\LaTeX/PDF)
n@1118 66 \item SMC15: PDF and \LaTeX source of 12th Sound and Music Computing Conference paper \cite{waet}
n@1118 67 \item WAC2016: PDF and \LaTeX source of 2nd Web Audio Conference paper\\
n@1118 68 \end{itemize}
n@1118 69 \textbf{Example project (./example\_eval/)}
n@1118 70 \begin{itemize}
n@1118 71 \item An example of what the set up XML should look like, with example audio files 0.wav-10.wav which are short recordings at 44.1kHz, 16bit of a woman saying the corresponding number (useful for testing randomisation and general familiarisation with the interface).\\
n@1118 72 \end{itemize}
n@1163 73 \textbf{Interface files (./interfaces/}
n@1163 74 \begin{itemize}
n@1163 75 \item Each interface class has a JavaScript file and an optional CSS style file. These are loaded as needed.
n@1163 76 \end{itemize}
n@1163 77
n@1118 78 \textbf{Output files (./saves/)}
n@1118 79 \begin{itemize}
n@1118 80 \item The output XML files of tests will be stored here by default by the \texttt{pythonServer.py} script.\\
n@1118 81 \end{itemize}
n@1118 82 \textbf{Auxiliary scripts (./scripts/)}
n@1118 83 \begin{itemize}
n@1118 84 \item Helpful Python scripts for extraction and visualisation of data.\\
n@1118 85 \end{itemize}
n@1118 86 \textbf{Test creation tool (./test\_create/)}
n@1118 87 \begin{itemize}
n@1118 88 \item Webpage for easily setting up your own test without having to delve into the XML.\\
n@1118 89 \end{itemize}
n@1118 90
n@1118 91 \subsection{Compatibility}
n@1118 92 As Microsoft Internet Explorer doesn't support the Web Audio API\footnote{\url{http://caniuse.com/\#feat=audio-api}}, you will need another browser like Google Chrome, Safari or Firefox (all three are tested and confirmed to work).
n@1118 93
n@1118 94 Firefox does not currently support other bit depths than 8 or 16 bit for PCM wave files. In the future, this will throw a warning message to tell the user that their content is being quantised automatically. %Nick? Right? To be removed if and when actually implemented
n@1118 95
n@1118 96 The tool is platform-independent and works in any browser that supports the Web Audio API. It does not require any specific, proprietary software. However, in case the tool is hosted locally (i.e. you are not hosting it on an actual webserver) you will need Python (2.7), which is a free programming language - see the next paragraph.
n@1118 97
n@1118 98 \clearpage
n@1118 99
n@1118 100
n@1118 101 \section{Test setup}
n@1118 102
n@1118 103 \subsection{Sample rate}
n@1118 104 Depending on how the experiment is set up, audio is resampled automatically (the Web Audio default) or the sample rate is enforced. In the latter case, you will need to make sure that the sample rate of the system is equal to the sample rate of these audio files. For this reason, all audio files in the experiment will have to have the same sample rate.
n@1118 105
n@1118 106 Always make sure that all other digital equipment in the playback chain (clock, audio interface, digital-to-analog converter, ...) is set to this same sample rate.
n@1118 107
n@1118 108 Note that upon changing the sampling rate, the browser will have to be restarted for the change to take effect.
n@1118 109
n@1118 110 \subsubsection{Mac OS X}
n@1118 111 To change the sample rate in Mac OS X, go to \textbf{Applications/Utilities/Audio MIDI Setup} or find this application with Spotlight (see Figure \ref{fig:audiomidisetup}). Then select the output of the audio interface you are using and change the `Format' to the appropriate number. Also make sure the bit depth and channel count are as desired.
n@1118 112 If you are using an external audio interface, you may have to go to the preference pane of that device to change the sample rate.
n@1118 113
n@1118 114 Also make sure left and right channel gains are equal, as some applications alter this without changing it back, leading to a predominantly louder left or right channel. See Figure \ref{fig:audiomidisetup} for an example where the channel gains are different.
n@1118 115
n@1118 116 \begin{figure}[tb]
n@1118 117 \centering
n@1118 118 \includegraphics[width=.65\textwidth]{img/audiomidisetup.png}
n@1118 119 \caption{The Audio MIDI Setup window in Mac OS X}
n@1118 120 \label{fig:audiomidisetup}
n@1118 121 \end{figure}
n@1118 122
n@1118 123 \subsubsection{Windows}
n@1118 124 To change the sample rate in Windows, right-click on the speaker icon in the lower-right corner of your desktop and choose `Playback devices'. Right-click the appropriate playback device and click `Properties'. Click the `Advanced' tab and verify or change the sample rate under `Default Format'. % NEEDS CONFIRMATION
n@1118 125 If you are using an external audio interface, you may have to go to the preference pane of that device to change the sample rate.
n@1118 126
n@1118 127 \subsection{Local test}
n@1118 128 If the test is hosted locally, you will need to run the local webserver provided with this tool.
n@1118 129
n@1118 130 \subsubsection{Mac OS X \& Linux}
n@1118 131
n@1118 132 On Mac OS X, Python comes preinstalled, as with most Unix/Linux distributions.
n@1118 133
n@1118 134 Open the Terminal (find it in \textbf{Applications/Terminal} or via Spotlight), and go to the folder you downloaded. To do this, type \texttt{cd [folder]}, where \texttt{[folder]} is the folder where to find the \texttt{pythonServer.py} script you downloaded. For instance, if the location is \texttt{/Users/John/Documents/test/}, then type
n@1118 135
n@1118 136 \texttt{cd /Users/John/Documents/test/}
n@1118 137
n@1118 138 Then hit enter and run the Python script by typing
n@1118 139
n@1118 140 \texttt{python pythonServer.py}
n@1118 141
n@1118 142 and hit enter again. See also Figure \ref{fig:terminal}.
n@1118 143
n@1118 144 \begin{figure}[htbp]
n@1118 145 \begin{center}
n@1118 146 \includegraphics[width=.75\textwidth]{pythonServer.png}
n@1118 147 \caption{Mac OS X: The Terminal window after going to the right folder (\texttt{cd [folder\_path]}) and running \texttt{pythonServer.py}.}
n@1118 148 \label{fig:terminal}
n@1118 149 \end{center}
n@1118 150 \end{figure}
n@1118 151
n@1118 152 Alternatively, you can simply type \texttt{python} (follwed by a space) and drag the file into the Terminal window from Finder. % DOESN'T WORK YET
n@1118 153
n@1118 154 You can leave this running throughout the different experiments (i.e. leave the Terminal open). Once running the terminal will report the current URL to type into your browser to initiate the test, usually this is http://localhost:8000/.
n@1118 155
n@1118 156 To start the test, open the browser and type
n@1118 157
n@1118 158 \texttt{localhost:8000}
n@1118 159
n@1118 160 and hit enter. The test should start (see Figure \ref{fig:test}).
n@1118 161
n@1118 162 To quit the server, either close the terminal window or press Ctrl+C on your keyboard to forcibly shut the server.
n@1118 163
n@1118 164 \subsubsection{Windows}
n@1118 165
n@1118 166 On Windows, Python 2.7 is not generally preinstalled and therefore has to be downloaded\footnote{\url{https://www.python.org/downloads/windows/}} and installed to be able to run scripts such as the local webserver, necessary if the tool is hosted locally.
n@1118 167
n@1118 168 Simply double click the Python script \texttt{pythonServer.py} in the folder you downloaded.
n@1118 169
n@1118 170 You may see a warning like the one in Figure \ref{fig:warning}. Click `Allow access'.
n@1118 171
n@1118 172 \begin{figure}[htbp]
n@1118 173 \begin{center}
n@1118 174 \includegraphics[width=.6\textwidth]{warning.png}
n@1118 175 \caption{Windows: Potential warning message when executing \texttt{pythonServer.py}.}
n@1118 176 \label{fig:warning}
n@1118 177 \end{center}
n@1118 178 \end{figure}
n@1118 179
n@1118 180 The process should now start, in the Command prompt that opens - see Figure \ref{fig:python}.
n@1118 181
n@1118 182 \begin{figure}[htbp]
n@1118 183 \begin{center}
n@1118 184 \includegraphics[width=.75\textwidth]{python.png}
n@1118 185 \caption{Windows: The Command Prompt after running \texttt{pythonServer.py} and opening the corresponding website.}
n@1118 186 \label{fig:python}
n@1118 187 \end{center}
n@1118 188 \end{figure}
n@1118 189
n@1118 190 You can leave this running throughout the different experiments (i.e. leave the Command Prompt open).
n@1118 191
n@1118 192 To start the test, open the browser and type
n@1118 193
n@1118 194 \texttt{localhost:8000}
n@1118 195
n@1118 196 and hit enter. The test should start (see Figure \ref{fig:test}).
n@1118 197
n@1118 198 \begin{figure}[htb]
n@1118 199 \begin{center}
n@1118 200 \includegraphics[width=.8\textwidth]{test.png}
n@1118 201 \caption{The start of the test in Google Chrome on Windows 7.}
n@1118 202 \label{fig:test}
n@1118 203 \end{center}
n@1118 204 \end{figure}
n@1118 205
n@1118 206 If at any point in the test the participant reports weird behaviour or an error of some kind, or the test needs to be interrupted, please notify the experimenter and/or refer to Section \ref{sec:troubleshooting}.
n@1118 207
n@1118 208 When the test is over (the subject should see a message to that effect, and click `Submit' one last time), the output XML file containing all collected data should have appeared in `saves/'. The names of these files are `test-0.xml', `test-1.xml', etc., in ascending order. The Terminal or Command prompt running the local web server will display the following file name. If such a file did not appear, please again refer to Section \ref{sec:troubleshooting}.
n@1118 209
n@1118 210 It is advised that you back up these results as often as possible, as a loss of this data means that the time and effort spent by the subject(s) has been in vain. Save the results to an external or network drive, and/or send them to the experimenter regularly.
n@1118 211
n@1118 212 To start the test again for a new participant, you do not need to close the browser or shut down the Terminal or Command Prompt. Simply refresh the page or go to \texttt{localhost:8000} again.
n@1118 213
n@1118 214
n@1118 215 \subsection{Remote test}
n@1118 216 Put all files on a web server which supports PHP. This allows the `save.php' script to store the XML result files in the `saves/' folder. If the web server is not able to store the XML file there at the end of the test, it will present the XML file locally to the user, as a `Save file' link.
n@1118 217
n@1118 218 Make sure the \texttt{projectReturn} attribute of the \texttt{setup} node is set to the \texttt{save.php} script.
n@1118 219
n@1118 220 Then, just go to the URL of the corresponding HTML file, e.g. \texttt{http://server.com/path/to/WAET/index.html?url=test/my-test.xml}. If storing on the server doesn't work at submission (e.g. if the \texttt{projectReturn} attribute isn't properly set), the result XML file will be presented to the subject on the client side, as a `Save file' link.
n@1118 221
n@1163 222 \subsection{Load a test / Multiple test documents}
n@1118 223 By default the index page will load a demo page of tests. To automatically load a test document, you need to append the location in the URL. If your URL is normally http://localhost:8000/index.html you would append the following: \texttt{?url=/path/to/your/test.xml}. Replace the fields with your actual path, the path is local to the running directory, so if you have your test in the directory \texttt{example\_eval} called \texttt{project.xml} you would append \texttt{?url=/example\_eval/project.xml}.
n@1118 224
n@1118 225 \clearpage
n@1118 226
n@1118 227 \section{Interfaces}
n@1118 228
n@1118 229 The Web Audio Evaluation Tool comes with a number of interface styles, each of which can be customised extensively, either by configuring them differently using the many optional features, or by modifying the JavaScript files.
n@1118 230
n@1163 231 To set the interface style for the whole test, set the attribute of the \texttt{setup} node to \texttt{interface="APE"}, where \texttt{"APE"} is one of the interface names below.
n@1118 232
n@1118 233 \subsection{APE}
n@1118 234 The APE interface is based on \cite{ape}, and consists of one or more axes, each corresponding with an attribute to be rated, on which markers are placed. As such, it is a multiple stimulus interface where (for each dimension or attribute) all elements are on one axis so that they can be maximally compared against each other, as opposed to rated individually or with regards to a single reference.
n@1118 235 It also contains an optional text box for each element, to allow for clarification by the subject, tagging, and so on.
n@1118 236
n@1118 237 \subsection{MUSHRA}
n@1163 238 This is a straightforward implementation of \cite{mushra}, especially common for the rating of audio quality, for instance for the evaluation of audio codecs. This can also operate any vertical slider style test and does not necessarily have to match the MUSHRA specification.
n@1163 239
n@1163 240 \subsection{AB}
n@1163 241 Performs a pairwise comparison, but supports ABX and n-way comparison (in the example we demonstrate it performing a 7-way comparison).
n@1163 242
n@1163 243 \subsection{discrete/Likert}
n@1163 244 Each audio element is given a discrete set of values based on the number of slider options specified. For instance, Likert specifies 5 values and therefore each audio element must be one of those 5 values.
n@1163 245
n@1163 246 \subsection{ACR/CCR/DCR/horizontal}
n@1163 247 Creates the same interfaces as MUSHRA except the sliders are horizontal, not vertical.
n@1118 248
n@1118 249
n@1118 250 \clearpage
n@1118 251
n@1163 252 \section{Project XML}
n@1163 253
n@1163 254 Each test is defined by its project XML file, examples of these can be seen in the ./example\_eval/ directory.
n@1163 255
n@1163 256 In the XML there are several nodes which must be defined:
n@1163 257 \begin{itemize}
n@1163 258 \item \texttt{<waet>}: The root node.
n@1163 259 \item \texttt{<setup>}: The first child node, defines whole-test parameters
n@1163 260 \item \texttt{<page>}: Specifies a test page, attached \emph{after} the \texttt{<setup>}.
n@1163 261 \item \texttt{<audioelement>}: Specifies an audio element.
n@1163 262 \end{itemize}
n@1163 263
n@1163 264 The test uses XML validation, so the ordering of nodes is important to pass this validation. Some nodes also have specific attributes which must be set and may even have a certain format to apply them. This is done so error checking can be performed both quickly and succintly with easy to find errors before loading and running a test session.
n@1163 265
n@1163 266 Before identifying any features, this part will walk you through the available nodes, their function and their attributes.
n@1163 267
n@1163 268 \subsection{Root}
n@1163 269 The root node is \texttt{<waet>}, it must have the following attributes:
n@1163 270
n@1163 271 \texttt{xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"}
n@1163 272
n@1163 273 \texttt{xsi:noNamespaceSchemaLocation="test-schema.xsd"}.
n@1163 274
n@1163 275 This will ensure it is checked against the XML schema for validation.
n@1163 276
n@1163 277 \subsection{Set up}
n@1163 278 The first child node, \texttt{<setup>} specifies any one time and global parameters. It takes the following attributes:
n@1163 279 \begin{itemize}
n@1163 280 \item \texttt{interface}: String, mandatory, specifies the interface to load
n@1163 281 \item \texttt{projectReturn}: URL, mandatory, specifies the return point. Can be a 3rd party server or the local server. Set to null to disable automatic saving. Specifying "save.php" will trigger the return if either the PHP or python servers are used. On error, it will always default to presenting the save on page.
n@1163 282 \item \texttt{randomiseOrder}: Boolean, optional, if true it will randomise the order of the test pages. Default is false.
n@1163 283 \item \texttt{testPages}: non-negative integer, optional. Specifies the number of test pages to actually test with. Combined with randomiseOrder being true will give a random set of test pages per participant from the given pool of \texttt{<page>} nodes. Specifying 0 disables this option, default is 0.
n@1163 284 \item \texttt{loudness}: non-positive integer, optional. Set the default LUFS target value. See \ref{sec:loudness} for more.
n@1163 285 \item \texttt{sampleRate}: positive integer, optional. If set, the sample rate reported by the Web Audio API must match this number. See \ref{sec:samplerate}.
n@1163 286 \end{itemize}
n@1163 287
n@1163 288 The \texttt{<setup>} node takes the following child nodes, note these must appear in this order:
n@1163 289 \begin{itemize}
n@1163 290 \item \texttt{<survey>}: Min of 0, max of 2 occurences. See \ref{sec:survey}
n@1163 291 \item \texttt{<metric>}: Must appear only once.
n@1163 292 \item \texttt{<interface>}: Must appear only once.
n@1163 293 \end{itemize}
n@1163 294
n@1163 295 \subsection{Page}
n@1163 296 The only other first level child nodes, these specify the test pages. It takes the following attributes:
n@1163 297 \begin{itemize}
n@1163 298 \item \texttt{id}: ID, mandatory. A string which must be unique across the entire XML. It is used to identify the page on test completion as pages are returned in the results in the order they appeared, not specified.
n@1163 299 \item \texttt{hostURL}: URL, mandatory. Used in conjuction with the \texttt{<audioelement>} url to specify where the audio files are located. For instance if all your files are in the directory \texttt{./test/} you can set this attribute to "/test/" and the \texttt{<audioelement>} url attribute only needs to file name. Set to "" if no hostURL prefix desired.
n@1163 300 \item \texttt{randomiseOrder}: Boolean, optional. If true the audio fragments are presented randomly rather than the order specified. See \ref{sec:randomisation}. Default is false.
n@1163 301 \item \texttt{repeatCount}: non-negative integer, optional. Specify the number of times to repeat the test page (re-present). Each presentation will appear as an individual page in the results. Default is 0.
n@1163 302 \item \texttt{loop}: Boolean, optional. If true, the audio elements will loop synchronously with each other. See \ref{sec:looping}. Default is false.
n@1163 303 \item \texttt{showElementComments}: Boolean, optional. If true then there will be a comment box on the test page for each audio element presented, see \ref{sec:commentboxes}.
n@1163 304 \item \texttt{loudness}: non-positive integer, optional. Set the LUFS target value for this page. Supersedes the \texttt{<setup>} loudness attribute for this page. See \ref{sec:loudness} for more.
n@1163 305 \end{itemize}
n@1163 306
n@1163 307 The \texttt{<page>} node takes the following child, nodes note these must appear in this order:
n@1163 308 \begin{itemize}
n@1163 309 \item \texttt{<title>}: Appear once or not at all. The text content of this node specifies the title of the test page, for instance \texttt{<title>John Doe's Test</title>}
n@1163 310 \item \texttt{<commentboxprefix}: Appear once or not at all. The text content specifies the prefix of the comment boxes, see \ref{sec:commentboxes}.
n@1163 311 \item \texttt{<interface>}: Must appear only once.
n@1163 312 \item \texttt{<audioelement>}: Minimum of one. Specifies an audio element, see \ref{sec:audioelement}.
n@1163 313 \item \texttt{<commentquestion}: Min of 0, max unlimited occurences. See \ref{sec:commentboxes}.
n@1163 314 \item \texttt{<survey>}: Min of 0, max of 2 occurences. See \ref{sec:survey}
n@1163 315 \end{itemize}
n@1163 316
n@1163 317 \subsection{Survey}
n@1163 318 \label{sec:survey}
n@1163 319 These specify any survey items to be presented. The must be a maximum of two of these per \texttt{<setup>} and \texttt{<page>} nodes. These have one attribute, location, which must be set to one of the following: before, pre, after or post. In this case before == pre and after == post. This specifies where the survey must appear before or after the node it is associated with. When a child of \texttt{<setup>} then pre/before will be shown before the first test page and after/post shown after completing the last test page. When a child of \texttt{<page>} then pre/before is before the test commences and after/post is once the test has been submitted.
n@1163 320
n@1163 321 The survey node takes as its only set of childs the \texttt{<surveyentry>} node of which there can be any number.
n@1163 322
n@1163 323 \subsubsection{Survey Entry}
n@1163 324 These nodes have the following attributes, which vary depending on the survey type wanted:
n@1163 325 \begin{itemize}
n@1163 326 \item \texttt{id}: ID, mandatory. Must be unique across the entire XML, used to identify the response in the results.
n@1163 327 \item \texttt{type}: String, mandatory. Must be one of the following: statement, question, checkbox, radio or number. This defines the type to show.
n@1163 328 \item \texttt{mandatory}: Boolean, optional. Defines if the survey must have a response or not. Does not apply to statements. Default is false.
n@1163 329 \item \texttt{min}: Number, optional. Only applies when \texttt{type="number"}, the minimum valid response.
n@1163 330 \item \texttt{max}: Number, optional. Only applies when \texttt{type="number"}, the maximum valid response.
n@1163 331 \item \texttt{boxsize}: String, optional. Only applies when \texttt{type="question"} and must be one of the following: normal (default), small, large or huge.
n@1163 332 \end{itemize}
n@1163 333
n@1163 334 The nodes have the following children, which vary depending on the survey type wanted.
n@1163 335 \begin{itemize}
n@1163 336 \item \texttt{<statement>}: Must appear only once. Its text content specifies the text to appear as the statement or question for the user to respond to.
n@1163 337 \item \texttt{<option>}: Only valid if the parent node has the attribute \texttt{type} set to checkbox or radio. Has attribute \texttt{name} to identify the selected option in the results. The text content is the text to show next to the radio/checkbox.
n@1163 338 \end{itemize}
n@1163 339
n@1163 340 \subsection{Interface}
n@1163 341 This node specifies any interface specific options and test parameters. It has an optional \texttt{name} attribute used to set the axis name (where applicable), such as the multi-axis APE interface. Specifying multiple interface nodes in a \texttt{<page>} node will trigger multiple axis where applicable, otherwise only the \emph{first node} will be used and the rest ignored.
n@1163 342
n@1163 343 The node has the following children, note the order these must appear in is as follows:
n@1163 344 \begin{itemize}
n@1163 345 \item \texttt{title}: Min 0, max 1 occurence. The text content specifies the name of the axis as shown to the user.
n@1163 346 \item \texttt{interfaceoption}: Min 0, max unbounded. Specifies the interface options. See \ref{sec:interfaceoption}.
n@1163 347 \item \texttt{scales}: Min 0, max 1 occurence. Contains \texttt{<scalelable>} nodes which define the displayed scales. See \ref{sec:scales}.
n@1163 348 \end{itemize}
n@1163 349
n@1163 350 \subsection{Audio Element}
n@1163 351 \label{sec:audioelement}
n@1163 352 Appear as children of the \texttt{page} node. Each of these specify an individual interface fragment to display. Multiple fragments can reference the same file (allowing for repetition with different parameters or blind-doubles). The node has the following attributes:
n@1163 353 \begin{itemize}
n@1163 354 \item \texttt{id}: ID, mandatory. Must be unique across the test page. Used to identify the specific fragment in the results.
n@1163 355 \item \texttt{url}: URL, mandatory. Used with the parent \texttt{page} nodes' \texttt{hostURL} attribute to get the full url of the audio file to load.
n@1163 356 \item \texttt{gain}: Float, optional. Specify the gain in decibels to apply to the node after loudness normalisation. Default is 0.
n@1163 357 \item \texttt{type}: String, optional. Must be one of the following: normal (default when not specified), anchor, reference or outside-reference. Normal, anchor and reference are presented as normal, outside-reference presents the node as a separate interface option.
n@1163 358 \item \texttt{marker}: Integer between 0 and 100, optional. Only used when \texttt{type="anchor"|"reference"}. See \ref{sec:referencesandanchors}.
n@1163 359 \end{itemize}
n@1163 360
n@1163 361
n@1118 362 \section{Features}
n@1118 363
n@1118 364 This section covers the different features implemented in the Web Audio Evaluation Tool, how to use them, and what to know about them.
n@1118 365
n@1118 366 Unless otherwise specified, \emph{each} feature described here is optional, i.e. it can be enabled or disabled and adjusted to some extent.
n@1118 367
n@1163 368 As the example project showcases (nearly) all of these features, please refer to its configuration XML document for a demonstration of how to enable and adjust them.
n@1118 369
n@1163 370 \subsection{Interface options}
n@1163 371 The interface node has children of interface options which are used to specify modifications to the test environment. These are divided into two catagories: check and show. Check are used to specify conditions which must be met before a page can be completed, these include checking all fragments have been played or checking all fragments have a comment and so on. Show is used to show an optional on page element or control, such as the playhead or master volume.
n@1163 372
n@1163 373 Check items have the attribute "type" set to "check". The following list gives the string to give the "name" attribute along with a description of the check.
n@1163 374 \begin{itemize}
n@1163 375 \item \texttt{fragmentPlayed}: Checks that all fragments have been at least partially played
n@1163 376 \item \texttt{fragmentFullPlayback}: Checks that all fragments have been fully played. \emph{NOTE:} This will always clear if the page is looping as it is not possible to know every sample has been played.
n@1163 377 \item \texttt{fragmentMoved}: Checks that all fragments have been moved. This is interface dependent, for instance on AB this will always clear as there is no movement.
n@1163 378 \item \texttt{fragmentComments}: Cheks that all fragments have a comment. Will clear if there are no on page comments but with a console warning.
n@1163 379 \item \texttt{scalerange}: Has two extra attributes "min" and "max". Checks that at least one element is below the min value and one element is above the max value.
n@1163 380 \end{itemize}
n@1118 381
n@1163 382 Show items have the attribute "type" set to "show". The following list gives the string to give the "name" attribute along with a description.
n@1163 383 \begin{itemize}
n@1163 384 \item \texttt{playhead}: Shows the playhead to the end user indicating where in the file they are currently listening
n@1163 385 \item \texttt{page-count}: Shows the current test page number and the total number of test pages.
n@1163 386 \item \texttt{volume}: Shows a master volume control to the user to manipulate the output gain of the page. This is tracked.
n@1163 387 \end{itemize}
n@1118 388
n@1118 389 \subsubsection{Multiple scales}
n@1118 390 In the case of multiple rating scales, e.g. when the stimuli are to be rated in terms of attributes `timbre' and `spatial impression', multiple interface nodes will have to be added, each specifying the title and annotations.
n@1118 391
n@1118 392 This is where the \texttt{interface}'s \texttt{name} attribute is particularly important: use this to retrieve the rating values, comments and metrics associated with the specified interface.
n@1118 393 If none is given, you can still use the automatically given \texttt{interface-id}, which is the interface number starting with 0 and corresponding to the order in which the rating scales appear.
n@1118 394
n@1118 395 \subsection{Randomisation}
n@1163 396 \label{sec:randomisation}
n@1118 397 [WORK IN PROGRESS]
n@1118 398
n@1118 399 \subsubsection{Randomisation of configuration XML files}
n@1163 400 The python server has a special function to automatically cycle through a list of test pages. Instead of directly requesting an XML, simply setting the url item in the browser URL to \texttt{pseudo.xml} will cycle through a list of XMLs. These XMLs must be in the local directory called \texttt{./pseudo/}.
n@1118 401 % how to
n@1118 402 % explain how this is implemented in the pythonServer
n@1118 403 %Nick? already implemented in the PHP?
n@1118 404 % Needs to be implemented in PHP and automated better, will complete soon
n@1118 405
n@1118 406
n@1118 407 \subsubsection{Randomsation of page order}
n@1118 408 The page order randomisation is set by the \texttt{<setup>} node attribute \texttt{randomise-order}, for example \texttt{<setup ... randomise-order="true">...</setup>} will randomise the test page order. When not set, the default is to \textbf{not} randomise the test page order.
n@1118 409
n@1118 410 \subsubsection{Randomisation of axis order}
n@1118 411
n@1118 412 \subsubsection{Randomisation of fragment order}
n@1118 413 The audio fragment randomisation is set by the \texttt{<audioholder>} node attribute \texttt{randomise-order}, for example \texttt{<audioholder ... randomise-order="true">...</audioholder>} will randomise the test page order. When not set, the default is to \textbf{not} randomise the test page order.
n@1118 414
n@1118 415 \subsubsection{Randomisation of initial slider position}
n@1118 416 By default slider values are randomised on start. The MUSHRA interface supports setting the initial values of all sliders throught the \texttt{<audioholder>} attribute \texttt{initial-position}. This takes an integer between 0 and 100 to signify the slider position.
n@1118 417 % /subsubsection{Randomisation of survey question order}
n@1118 418 % should be an attribute of the individual 'pretest' and 'posttest' elements
n@1118 419 % uncomment once we have it
n@1118 420
n@1118 421 \subsection{Looping}
n@1163 422 \label{sec:looping}
n@1163 423 Looping enables the fragments to loop until stopped by the user. Looping is synchronous so all fragments start at the same time on each loop.
n@1163 424 Individual test pages can have their playback looped by the \texttt{<page>} attribute \texttt{loop} with a value of "true" or "false".
n@1118 425 If the fragments are not of equal length initially, they are padded with zeros so that they are equal length, to enable looping without the fragments going out of sync relative to each other.
n@1118 426
n@1163 427 Note that fragments cannot be played until all page fragments are loaded when in looped mode, as the engine needs to know the length of each fragment to calculate the padding.
n@1118 428
n@1118 429 \subsection{Sample rate}
n@1163 430 \label{sec:samplerate}
n@1163 431 If you require the test to be conducted at a certain sample rate (i.e. you do not tolerate resampling of the elements to correspond with the system's sample rate), add \texttt{sampleRate="96000"} - where ``96000'' can be any support sample rate (in Hz) - so that a warning message is shown alerting the subject that their system's sample rate is different from this enforced sample rate. This is checked immediately after parsing and stops the page loading any other elements if this check has failed.
n@1118 432
n@1118 433 \subsection{Metrics}
n@1118 434 Enable the collection of metrics by adding \texttt{collectMetrics=`true'} in the \texttt{setup} node. % Should this always be on??
n@1118 435
n@1118 436 The \texttt{Metric} node, which contains the metrics to be tracked during the complete test, is a child of the \texttt{setup} node, and it could look as follows.
n@1118 437
n@1118 438 \begin{lstlisting}
n@1118 439 <Metric>
n@1118 440 <metricEnable>testTimer</metricEnable>
n@1118 441 <metricEnable>elementTimer</metricEnable>
n@1118 442 <metricEnable>elementInitialPosition</metricEnable>
n@1118 443 <metricEnable>elementTracker</metricEnable>
n@1118 444 <metricEnable>elementFlagListenedTo</metricEnable>
n@1118 445 <metricEnable>elementFlagMoved</metricEnable>
n@1118 446 <metricEnable>elementListenTracker</metricEnable>
n@1118 447 </Metric>
n@1118 448 \end{lstlisting}
n@1118 449
n@1118 450 When in doubt, err on the inclusive side, as one never knows which information is needed in the future. Most of these metrics are necessary for post-processing scripts such as timeline\_view\_movement.py.
n@1118 451
n@1118 452 \subsubsection{Time test duration}
n@1118 453 \texttt{testTimer}\\
n@1118 454 One per test page. Presents the total test time from the first playback on the test page to the submission of the test page (exculding test time of the pre-/post- test surveys). This is presented in the results as \texttt{<metricresult id="testTime"> 8.60299319727892 </metricresult>}. The time is in seconds.
n@1118 455
n@1118 456 \subsubsection{Time fragment playback}
n@1118 457 \texttt{elementTimer}\\
n@1118 458 One per audio fragment per test page. This totals up the entire time the audio fragment has been listened to in this test and presented \texttt{<metricresult name="enableElementTimer"> 1.0042630385487428 </metricresult>}. The time is in seconds.
n@1118 459
n@1118 460 \subsubsection{Initial positions}
n@1118 461 \texttt{elementInitialPosition}\\
n@1118 462 One per audio fragment per test page. Tracks the initial position of the sliders, especially relevant when these are randomised. Example result \texttt{<metricresult name="elementInitialPosition"> 0.8395522388059702 </metricresult>}.
n@1118 463
n@1118 464 \subsubsection{Track movements}
n@1118 465 \texttt{elementTracker}\\
n@1118 466 One per audio fragment per test page. Tracks the movement of each interface object. Each movement event has the time it occured at and the new value.
n@1118 467 \subsubsection{Which fragments listened to}
n@1118 468 \texttt{elementFlagListenedTo}\\
n@1118 469 One per audio fragment per test page. Boolean response, set to true if listened to.
n@1118 470 \subsubsection{Which fragments moved}
n@1118 471 \texttt{elementFlagMoved}\\
n@1118 472 One per audio fragment per test page. Binary check whether or not a the marker corresponding with a particular fragment was moved at all throughout the experiment.
n@1118 473
n@1118 474 \subsubsection{elementListenTracker}
n@1118 475 \texttt{elementListenTracker}\\
n@1118 476 One per audio fragment per test page. Tracks the playback events of each audio element pairing both the time in the test when playback started and when it stopped, it also gives the buffertime positions.
n@1118 477
n@1118 478 \subsection{References and anchors}
n@1163 479 \label{sec:referencesandanchors}
n@1118 480 The audio elements, \texttt{<audioelement>} have the attribute \texttt{type}, which defaults to normal. Setting this to one of the following will have the following effects.
n@1118 481 \subsubsection{Outside Reference}
n@1118 482 Set type to 'outside-reference'. This will place the object in a separate playback element clearly labelled as an outside reference. This is exempt of any movement checks but will still be included in any listening checks.
n@1118 483 \subsubsection{Hidden reference}
n@1118 484 Set type to 'reference'. The element will still be randomised as normal (if selected) and presented to the user. However the element will have the 'reference' type in the results to quickly find it. The reference can be forced to be below a value before completing the test page by setting the attribute 'marker' to be a value between 0 and 100 representing the integer value position it must be equal to or above.
n@1118 485 \subsubsection{Hidden anchor}
n@1118 486 Set type to 'anchor'. The element will still be randomised as normal (if selected) and presented to the user. However the element will have the 'anchor' type in the results to quickly find it. The anchor can be forced to be below a value before completing the test page by setting the attribute 'marker' to be a value between 0 and 100 representing the integer value position it must be equal to or below.
n@1118 487
n@1118 488 \subsection{Checks}
n@1118 489 \label{sec:checks}
n@1118 490
n@1118 491 %blabla
n@1118 492 These checks are enabled in the \texttt{interface} node, which is a child of the \texttt{setup} node.
n@1118 493 \subsubsection{Playback checks}
n@1118 494 % what it does/is
n@1118 495 Enforce playing each sample at least once, for at least a little bit (e.g. this test is satisfied even if you only play a tiny portion of the file), by alerting the user to which samples have not been played upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before clicking each sample at least once.
n@1118 496 % how to enable/disable
n@1118 497
n@1118 498 Alternatively, one can check whether the \emph{entire} fragment was listened to at least once.
n@1118 499 % how to enable
n@1118 500
n@1118 501 Add \texttt{<check name="fragmentPlayed"/>} to the \texttt{interface} node.
n@1118 502
n@1118 503
n@1118 504 \subsubsection{Movement check}
n@1118 505 Enforce moving each sample at least once, for at least a little bit (e.g. this test is satisfied even if you only play a tiny portion of the file), by alerting the user to which samples have not been played upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before clicking each sample at least once.
n@1118 506 If there are several axes, the warning will specify which samples have to be moved on which axis.
n@1118 507
n@1118 508 Add \texttt{<check name="fragmentMoved"/>} to the \texttt{interface} node.
n@1118 509
n@1118 510 \subsubsection{Comment check}
n@1118 511 % How to enable/disable?
n@1118 512
n@1118 513 Enforce commenting, by alerting the user to which samples have not been commented on upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before putting at least one character in each comment box.
n@1118 514
n@1118 515 Note that this does not apply to any extra (text, radio button, checkbox) elements, unless these have the `mandatory' option enabled. %Nick? is this extra 'mandatory' option implemented?
n@1118 516
n@1118 517 Add \texttt{<check name="fragmentComments"/>} to the \texttt{interface} node.
n@1118 518
n@1118 519 %ADD: how to add a custom comment box
n@1118 520
n@1118 521 \subsubsection{Scale use check}
n@1118 522 It is possible to enforce a certain usage of the scale, meaning that at least one slider needs to be below and/or above a certain percentage of the slider.
n@1118 523
n@1118 524 Add \texttt{<check name="scalerange" min="25" max="75"/>} to the \texttt{interface} node.
n@1118 525
n@1118 526 \subsubsection{Note on the use of multiple rating axes}
n@1118 527 I.e. what if more than one axis? How to specify which axis the checks relate to? %Nick? to add?
n@1118 528
n@1118 529 \subsection{Platform information}
n@1118 530 % what does it do, what does it look like
n@1118 531 % limitations?
n@1118 532 For troubleshooting and usage statistics purposes, information about the browser and the operating system is logged in the results XML file. This is especially useful in the case of remote tests, when it is not certain which operating system, browser and/or browser were used. Note that this information is not always available and/or accurate, e.g. when the subject has taken steps to be more anonymous, so it should be treated as a guide only.
n@1118 533
n@1118 534 Example:
n@1118 535 \begin{lstlisting}
n@1118 536 <navigator>
n@1118 537 <platform>MacIntel</platform>
n@1118 538 <vendor>Google Inc.</vendor>
n@1163 539 <uagent>Mozilla/5.0 ... </uagent>
n@1118 540 </navigator>
n@1118 541 \end{lstlisting}
n@1118 542
n@1118 543 \subsection{Gain}
n@1118 544 It is possible to set the gain (in decibel) applied to the different audioelements, as an attribute of the \texttt{audioelement} nodes in the configuration XML file:
n@1118 545
n@1118 546 \texttt{<audioElements url="sample-01.wav" gain="-6" id="sample01quieter" />}\\
n@1118 547 Please note, there are no checks on this to detect if accidentaly typed in linear.
n@1118 548
n@1118 549 \subsection{Loudness}
n@1163 550 \label{sec:loudness}
n@1118 551 % automatic loudness equalisation
n@1118 552 % guide to loudness.js
n@1163 553 Each audio fragment on loading has its loudness calculated. The tool uses the EBU R 128 recommendation following the ITU-R BS.1770-4 loduness calculations to return the integreated LUFS loudness. The attribute \texttt{loudness} will set the loudness from the scope it is applied in. Applying it in the \texttt{<setup>} node will set the loudness for all test pages. Applying it in the \texttt{<page>} node will set the loudness for that page. Applying it in the \texttt{<audioelement>} node will set the loudness for that fragment. The scope is set locally, so if there is a loudness on both the \texttt{<page>} and \texttt{<setup>} nodes, that test page will take the value associated with the \texttt{<page>}. The loudness attribute is set in LUFS
n@1118 554
n@1118 555 \clearpage
n@1118 556
n@1118 557
n@1118 558 \section{Using the test create tool}
n@1118 559 We provide a test creation tool, available in the directory test\_create. This tool is a self-contained web page, so doubling clicking will launch the page in your system default browser.
n@1118 560
n@1118 561 The test creation tool can help you build a simple test very quickly. By simply selecting your interface and clicking check-boxes you can build a test in minutes.
n@1118 562
n@1118 563 Include audio by dragging and dropping the stimuli you wish to include.
n@1118 564
n@1118 565 The tool examines your XML before exporting to ensure you do not export an invalid XML structure which would crash the test.
n@1118 566
n@1118 567 This guide will help you to construct your own interface on top of the WAET (Web Audio Evaluation Tool) engine. The WAET engine resides in the core.js file, this contains prototype objects to handle most of the test creation, operation and data collection. The interface simply has to link into this at the correct points.
n@1118 568
n@1118 569 \section{Building your own interface}
n@1118 570
n@1118 571 \subsection{Nodes to familiarise}
n@1118 572 Core.js handles several very important nodes which you should become familiar with. The first is the Audio Engine, initialised and stored in variable `AudioEngineContext'. This handles the playback of the web audio nodes as well as storing the `AudioObjects'. The `AudioObjects' are custom nodes which hold the audio fragments for playback. These nodes also have a link to two interface objects, the comment box if enabled and the interface providing the ranking. On creation of an `AudioObject' the interface link will be nulled, it is up to the interface to link these correctly.
n@1118 573
n@1118 574 The specification document will be decoded and parsed into an object called `specification'. This will hold all of the specifications various nodes. The test pages and any pre/post test objects are processed by a test state which will proceed through the test when called to by the interface. Any checks (such as playback or movement checks) are to be completed by the interface before instructing the test state to proceed. The test state will call the interface on each page load with the page specification node.
n@1118 575
n@1118 576 \subsection{Modifying \texttt{core.js}}
n@1118 577 Whilst there is very little code actually needed, you do need to instruct core.js to load your interface file when called for from a specification node. There is a function called `loadProjectSpecCallback' which handles the decoding of the specification and setting any external items (such as metric collection). At the very end of this function there is an if statement, add to this list with your interface string to link to the source. There is an example in there for both the APE and MUSHRA tests already included. Note: Any updates to core.js in future work will most likely overwrite your changes to this file, so remember to check your interface is still here after any update that interferes with core.js.
n@1118 578 Any further files can be loaded here as well, such as css styling files. jQuery is already included.
n@1118 579
n@1118 580 \subsection{Building the Interface}
n@1118 581 Your interface file will get loaded automatically when the `interface' attribute of the setup node matches the string in the `loadProjectSpecCallback' function. The following functions must be defined in your interface file.
n@1118 582 \begin{itemize}
n@1118 583 \item \texttt{loadInterface} - Called once when the document is parsed. This creates any necessary bindings, such as to the metric collection classes and any check commands. Here you can also start the structure for your test such as placing in any common nodes (such as the title and empty divs to drop content into later).
n@1118 584 \item \texttt{loadTest(audioHolderObject)} - Called for each page load. The audioHolderObject contains a specification node holding effectively one of the audioHolder nodes.
n@1118 585 \item \texttt{resizeWindow(event)} - Handle for any window resizing. Simply scale your interface accordingly. This function must be here, but can me an empty function call.
n@1118 586 \end{itemize}
n@1118 587
n@1118 588 \subsubsection{loadInterface}
n@1118 589 This function is called by the interface once the document has been parsed since some browsers may parse files asynchronously. The best method is simply to put `loadInterface()' at the top of your interface file, therefore when the JavaScript engine is ready the function is called.
n@1118 590
n@1118 591 By default the HTML file has an element with id ``topLevelBody'' where you can build your interface. Make sure you blank the contents of that object. This function is the perfect time to build any fixed items, such as the page title, session titles, interface buttons (Start, Stop, Submit) and any holding and structure elements for later on.
n@1118 592
n@1118 593 At the end of the function, insert these two function calls: testState.initialise() and testState.advanceState();. This will actually begin the test sequence, including the pre-test options (if any are included in the specification document).
n@1118 594
n@1118 595 \subsubsection{loadTest(audioHolderObject)}
n@1118 596 This function is called on each new test page. It is this functions job to clear out the previous test and set up the new page. Use the function audioEngineContext.newTestPage(); to instruct the audio engine to prepare for a new page. ``audioEngineContext.audioObjects = [];'' will delete any audioObjects, interfaceContext.deleteCommentBoxes(); will delete any comment boxes and interfaceContext.deleteCommentQuestions(); will delete any extra comment boxes specified by commentQuestion nodes.
n@1118 597
n@1118 598 This function will need to instruct the audio engine to build each fragment. Just passing the constructor each element from the audioHolderObject will build the track, audioEngineContext.newTrack(element) (where element is the audioHolderObject audio element). This will return a reference to the constructed audioObject. Decoding of the audio will happen asynchronously.
n@1118 599
n@1118 600 You also need to link audioObject.interfaceDOM with your interface object for that audioObject. The interfaceDOM object has a few default methods. Firstly it must start disabled and become enabled once the audioObject has decoded the audio (function call: enable()). Next it must have a function exportXMLDOM(), this will return the xml node for your interface, however the default is for it to return a value node, with textContent equal to the normalised value. You can perform other functions, but our scripts may not work if something different is specified (as it will breach our results specifications). Finally it must also have a method getValue, which returns the normalised value.
n@1118 601
n@1118 602 It is also the job the interfaceDOM to call any metric collection functions necessary, however some functions may be better placed outside (for example, the APE interface uses drag and drop, therefore the best way was to call the metric functions from the dragEnd function, which is called when the interface object is dropped). Metrics based upon listening are handled by the audioObject. The interfaceDOM object must manage any movement metrics. For a list of valid metrics and their behaviours, look at the project specification document included in the repository/docs location. The same goes for any checks required when pressing the submit button, or any other method to proceed the test state.
n@1118 603
n@1118 604 \clearpage
n@1118 605 \section{Analysis and diagnostics}
n@1118 606 \subsection{In the browser}
n@1118 607 See `analysis.html' in the main folder: immediate visualisation of (by default) all results in the `saves/' folder.
n@1118 608
n@1118 609 \subsection{Python scripts}
n@1118 610 The package includes Python (2.7) scripts (in `scripts/') to extract ratings and comments, generate visualisations of ratings and timelines, and produce a fully fledged report.
n@1118 611
n@1118 612 Visualisation requires the free matplotlib toolbox (http://matplotlib.org), numpy and scipy.
n@1118 613 By default, the scripts can be run from the `scripts' folder, with the result files in the `saves' folder (the default location where result XMLs are stored). Each script takes the XML file folder as an argument, along with other arguments in some cases.
n@1118 614 Note: to avoid all kinds of problems, please avoid using spaces in file and folder names (this may work on some systems, but others don't like it).
n@1118 615
n@1118 616 \subsubsection{comment\_parser.py}
n@1118 617 Extracts comments from the output XML files corresponding with the different subjects found in `saves/'. It creates a folder per `audioholder'/page it finds, and stores a CSV file with comments for every `audioelement'/fragment within these respective `audioholders'/pages. In this CSV file, every line corresponds with a subject/output XML file. Depending on the settings, the first column containing the name of the corresponding XML file can be omitted (for anonymisation).
n@1118 618 Beware of Excel: sometimes the UTF-8 is not properly imported, leading to problems with special characters in the comments (particularly cumbersome for foreign languages).
n@1118 619
n@1118 620 \subsubsection{evaluation\_stats.py}
n@1118 621 Shows a few statistics of tests in the `saves/' folder so far, mainly for checking for errors. Shows the number of files that are there, the audioholder IDs that were tested (and how many of each separate ID), the duration of each page, the duration of each complete test, the average duration per page, and the average duration in function of the page number.
n@1118 622
n@1118 623 \subsubsection{generate\_report.py}
n@1118 624 Similar to `evaluation\_stats.py', but generates a PDF report based on the output files in the `saves/' folder - or any folder specified as command line argument. Uses pdflatex to write a LaTeX document, then convert to a PDF.
n@1118 625
n@1118 626 \subsubsection{score\_parser.py}
n@1118 627 Extracts rating values from the XML to CSV - necessary for running visualisation of ratings. Creates the folder `saves/ratings/' if not yet created, to which it writes a separate file for every `audioholder'/page in any of the output XMLs it finds in `saves/'. Within each file, rows represent different subjects (output XML file names) and columns represent different `audioelements'/fragments.
n@1118 628
n@1118 629 \subsubsection{score\_plot.py}
n@1118 630 Plots the ratings as stored in the CSVs created by score\_parser.py
n@1118 631 Depending on the settings, it displays and/or saves (in `saves/ratings/') a boxplot, confidence interval plot, scatter plot, or a combination of the aforementioned.
n@1118 632 Requires the free matplotlib library.
n@1118 633 At this point, more than one subjects are needed for this script to work.
n@1118 634
n@1118 635 \subsubsection{timeline\_view\_movement.py}
n@1118 636 Creates a timeline for every subject, for every `audioholder'/page, corresponding with any of the output XML files found in `saves/'. It shows the marker movements of the different fragments, along with when each fragment was played (red regions). Automatically takes fragment names, rating axis title, rating axis labels, and audioholder name from the XML file (if available).
n@1118 637
n@1118 638 \subsubsection{timeline\_view.py} % should be omitted or absorbed by the above soon
n@1118 639 Creates a timeline for every subject, for every `audioholder'/page, corresponding with any of the output XML files found in `saves/'. It shows when and for how long the subject listened to each of the fragments.
n@1118 640
n@1118 641
n@1118 642
n@1118 643 \clearpage
n@1118 644 \section{Troubleshooting} \label{sec:troubleshooting}
n@1118 645 \subsection{Reporting bugs and requesting features}
n@1118 646 Thanks to feedback from using the interface in experiments by the authors and others, many bugs have been caught and fatal crashes due to the interface seem to be a thing of the past entirely.
n@1118 647
n@1118 648 We continually develop this tool to fix issues and implement features useful to us or our user base. See \url{https://code.soundsoftware.ac.uk/projects/webaudioevaluationtool/issues} for a list of feature requests and bug reports, and their status.
n@1118 649
n@1118 650 Please contact the authors if you experience any bugs, if you would like additional functionality, if you spot any errors or gaps in the documentation, if you have questions about using the interface, or if you would like to give any feedback (even positive!) about the interface. We look forward to learning how the tool has (not) been useful to you.
n@1118 651
n@1118 652
n@1118 653 \subsection{First aid}
n@1118 654 Meanwhile, if things do go wrong or the test needs to be interrupted for whatever reason, all data is not lost. In a normal scenario, the test needs to be completed until the end (the final `Submit'), at which point the output XML is stored in the \texttt{saves/}. If this stage is not reached, open the JavaScript Console (see below for how to find it) and type
n@1118 655
n@1118 656 \texttt{createProjectSave()}
n@1118 657
n@1118 658 to present the result XML file on the client side, or
n@1118 659
n@1118 660 \texttt{createProjectSave(specification.projectReturn)}
n@1118 661
n@1118 662 to try to store it to the specified location, e.g. the `saves/' folder on the web server or the local machine (on failure the result XML should be presented directly in the web browser instead)
n@1118 663
n@1118 664 and hit enter. This will open a pop-up window with a hyperlink that reads `Save File'; click it and an XML file with results until that point should be stored in your download folder.
n@1118 665
n@1118 666 Alternatively, a lot of data can be read from the same console, in which the tool prints a lot of debug information. Specifically:
n@1118 667 \begin{itemize}
n@1118 668 \item the randomisation of pages and fragments are logged;
n@1118 669 \item any time a slider is played, its ID and the time stamp (in seconds since the start of the test) are displayed;
n@1118 670 \item any time a slider is dragged and dropped, the location where it is dropped including the time stamp are shown;
n@1118 671 \item any comments and pre- or post-test questions and their answers are logged as well.
n@1118 672 \end{itemize}
n@1118 673
n@1118 674 You can select all this and save into a text file, so that none of this data is lost. You may to choose to do this even when a test was successful as an extra precaution.
n@1118 675
n@1118 676 If you encounter any issue which you believe to be caused by any aspect of the tool, and/or which the documentation does not mention, please do let us know!
n@1118 677
n@1118 678 \subsubsection*{Opening the JavaScript Console}
n@1118 679 \begin{itemize}
n@1118 680 \item In Google Chrome, the JavaScript Console can be found in \textbf{View$>$Developer$>$JavaScript Console}, or via the keyboard shortcut Cmd + Alt + J (Mac OS X).
n@1118 681 \item In Safari, the JavaScript Console can be found in \textbf{Develop$>$Show Error Console}, or via the keyboard shortcut Cmd + Alt + C (Mac OS X). Note that for the Developer menu to be visible, you have to go to Preferences (Cmd + ,) and enable `Show Develop menu in menu bar' in the `Advanced' tab. \textbf{Note that as long as the Developer menu is not visible, nothing is logged to the console, i.e. you will only be able to see diagnostic information from when you switched on the Developer tools onwards.}
n@1118 682 \item In Firefox, go to \textbf{Tools$>$Web Developer$>$Web Console}, or hit Cmd + Alt + K.
n@1118 683 \end{itemize}
n@1118 684
n@1118 685 \subsection{Known issues and limitations}
n@1118 686 \label{sec:knownissues}
n@1118 687
n@1118 688 The following is a non-exhaustive list of problems and limitations you may experience using this tool, due to not being supported yet by us, or by the Web Audio API and/or (some) browsers.
n@1118 689
n@1118 690 \begin{itemize}
n@1118 691 \item Issue \href{https://code.soundsoftware.ac.uk/issues/1463}{\textbf{\#1463}}: \textbf{Firefox} only supports 8 bit and 16 bit WAV files. Pending automatic requantisation (which deteriorates the audio signal's dynamic range to some extent), WAV format stimuli need to adhere to these limitations in order for the test to be compatible with Firefox.
n@1118 692 \item Issues \href{https://code.soundsoftware.ac.uk/issues/1474}{\textbf{\#1474}} and \href{https://code.soundsoftware.ac.uk/issues/1462}{\textbf{\#1462}}: On occasions, audio is not working - or only a continuous `beep' can be heard - notably in \textbf{Safari}. Refreshing, quitting the browser and even enabling Developer tools in Safari's Preferences pane (`Advanced' tab: ``Show `Develop' menu in menu bar'') has helped resolve this. If no (high quality) audio can be heard, make sure your entire playback system's settings are all correct.
n@1118 693 \end{itemize}
n@1118 694
n@1118 695 \clearpage
n@1118 696 \bibliographystyle{ieeetr}
n@1118 697 \bibliography{Instructions}{}
n@1118 698
n@1118 699
n@1118 700 \clearpage
n@1118 701 \appendix
n@1118 702
n@1118 703 \section{Legacy}
n@1118 704 The APE interface and most of the functionality of the first WAET editions are inspired by the APE toolbox for MATLAB \cite{ape}. See \url{https://code.soundsoftware.ac.uk/projects/ape} for the source code and \url{http://brechtdeman.com/publications/aes136.pdf} for the corresponding paper.
n@1118 705
n@1118 706 \clearpage
n@1118 707
n@1118 708 \section{Listening test instructions example}
n@1118 709
n@1118 710 Before each test, show the instructions below or similar and make sure it is available to the subject throughout the test. Make sure to ask whether the participant has any questions upon seeing and/or reading the instructions.
n@1118 711
n@1118 712 \begin{itemize}
n@1118 713 \item You will be asked for your name (``John Smith'') and location (room identifier).
n@1118 714 \item An interface will appear, where you are asked to
n@1118 715 \begin{itemize}
n@1118 716 \item click green markers to play the different mixes;
n@1118 717 \item drag the markers on a scale to reflect your preference for the mixes;
n@1118 718 \item comment on these mixes, using text boxes with corresponding numbers (in your \textbf{native language});
n@1118 719 \item optionally comment on all mixes together, or on the song, in `General comments'.
n@1118 720 \end{itemize}
n@1118 721 \item You are asked for your personal, honest opinion. Feel free to use the full range of the scale to convey your opinion of the various mixes. Don?t be afraid to be harsh and direct.
n@1118 722 \item The markers appear at random positions at first (which means some markers may hide behind others).
n@1118 723 \item The interface can take a few seconds to start playback, but switching between mixes should be instantaneous.
n@1118 724 \item This is a research experiment, so please forgive us if things go wrong. Let us know immediately and we will fix it or restart the test.
n@1118 725 \item When the test is finished (after all songs have been evaluated), just call the experimenter, do NOT close the window.
n@1118 726 \item After the test, please fill out our survey about your background, experience and feedback on the test.
n@1118 727 \item By participating, you consent to us using all collected data for research. Unless asked explicitly, all data will be anonymised when shared.
n@1118 728 \end{itemize}
n@1118 729
n@1118 730 \clearpage
n@1118 731
n@1118 732 \section{Terminology} % just to keep track of what exactly we call things. Don't use terms that are too different, to avoid confusion.
n@1118 733 As a guide to better understand the Instructions, and to expand them later, here is a list of terms that may be unclear or ambiguous unless properly defined.
n@1118 734 \begin{description}
n@1118 735 \item[Subject] The word we use for a participant, user, ... of the test, i.e. not the experimenter who designs the test but the person who evaluates the audio under test as part of an experiment (or the preparation of one).
n@1118 736 \item[User] The person who uses the tool to configure, run and analyse the test - i.e. the experimenter, most likely a researcher - or at least
n@1118 737 \item[Page] A screen in a test; corresponds with an \texttt{audioholder}
n@1118 738 \item[Fragment] An element, stimulus or sample in a test; corresponds with an \texttt{audioelement}
n@1118 739 \item[Test] A complete test which can consist of several pages; corresponds with an entire configuration XML file
n@1118 740 \item[Configuration XML file] The XML file containing the necessary information on interface, samples, survey questions, configurations, ... which the JavaScript modules read to produce the desired test.
n@1118 741 \item[Results XML file] The output of a successful test, including ratings, comments, survey responses, timing information, and the complete configuration XML file with which the test was generated in the first place.
n@1118 742 \end{description}
n@1118 743
n@1118 744 \clearpage
n@1118 745
n@1118 746 \setcounter{secnumdepth}{0} % don't number this last bit
n@1118 747 \section{Contact details} % maybe add web pages, Twitter accounts, whatever you like
n@1118 748 \label{sec:contact}
n@1118 749
n@1118 750 \begin{itemize}
n@1118 751 \item Nicholas Jillings: \texttt{nicholas.jillings@mail.bcu.ac.uk}
n@1118 752 \item Brecht De Man: \texttt{b.deman@qmul.ac.uk}
n@1118 753 \item David Moffat: \texttt{d.j.moffat@qmul.ac.uk}
n@1118 754 \end{itemize}
n@1118 755
n@1118 756 \end{document}