annotate docs/Instructions/Instructions.tex @ 2450:c602b4c69310

Hotfix: Pseudo failure in pythonServer for 3.x
author Nicholas Jillings <nicholas.jillings@mail.bcu.ac.uk>
date Tue, 02 Aug 2016 10:35:43 +0100
parents e4fcf54abcf5
children
rev   line source
b@1402 1 \documentclass[11pt, oneside]{article} % use "amsart" instead of "article" for AMSLaTeX format
b@1402 2 \usepackage{geometry} % See geometry.pdf to learn the layout options. There are lots.
b@1402 3 \geometry{letterpaper} % ... or a4paper or a5paper or ...
b@1402 4 %\geometry{landscape} % Activate for rotated page geometry
b@1402 5 \usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent
b@1402 6 \usepackage{graphicx} % Use pdf, png, jpg, or eps§ with pdflatex; use eps in DVI mode
b@1402 7 % TeX will automatically convert eps --> pdf in pdflatex
b@1402 8
b@1402 9 \usepackage{listings} % Source code
b@1435 10 \usepackage{xcolor} % colour (source code for instance)
b@1435 11 \definecolor{grey}{rgb}{0.1,0.1,0.1}
b@1435 12 \definecolor{darkblue}{rgb}{0.0,0.0,0.6}
b@1435 13 \definecolor{cyan}{rgb}{0.0,0.6,0.6}
b@1435 14
b@1402 15 \usepackage{amssymb}
b@1402 16 \usepackage{cite}
b@1402 17 \usepackage{hyperref} % Hyperlinks
b@1402 18 \usepackage[nottoc,numbib]{tocbibind} % 'References' in TOC
b@2233 19 \usepackage{url}
b@1402 20
b@1402 21 \graphicspath{{img/}} % Relative path where the images are stored.
b@1402 22
b@1402 23 \title{Instructions for \\ Web Audio Evaluation Tool}
b@1402 24 \author{Nicholas Jillings, Brecht De Man and David Moffat}
b@2209 25 %\date{7 December 2015} % Activate to display a given date or no date
b@1402 26
b@1402 27 \begin{document}
b@1402 28 \maketitle
b@1402 29
b@1435 30 These instructions are about use of the Web Audio Evaluation Tool on Windows and Mac OS X platforms.
b@1435 31
b@1435 32 We request that you acknowledge the authors and cite our work when using it \cite{waet}, see also CITING.txt.
b@1435 33
b@2209 34 The tool is available for academic use in its entirety including source code on \url{https://github.com/BrechtDeMan/WebAudioEvaluationTool}, under the GNU General Public License v3.0 (\url{http://choosealicense.com/licenses/gpl-3.0/}), see also LICENSE.txt.
b@2201 35
b@2201 36 The SoundSoftware project page, including a Mercurial repository, is \url{https://code.soundsoftware.ac.uk/projects/webaudioevaluationtool/}.
b@1435 37
b@2429 38 \textbf{The most current version of these instructions can be found on \url{https://github.com/BrechtDeMan/WebAudioEvaluationTool/wiki}.}
b@2429 39
b@1402 40
b@1402 41 \tableofcontents
b@1402 42
b@1402 43 \clearpage
b@1402 44
b@1402 45 \section{Installation}
b@2233 46 \label{sec:installation}
b@2233 47 Download the folder (\url{https://code.soundsoftware.ac.uk/hg/webaudioevaluationtool/archive/tip.zip}) and unzip in a location of your choice, or pull the source code from \url{https://github.com/BrechtDeMan/WebAudioEvaluationTool.git} (git) or \url{https://code.soundsoftware.ac.uk/hg/webaudioevaluationtool} (Mercurial).
b@1402 48
b@1402 49 \subsection{Contents}
b@1402 50 The folder should contain the following elements: \\
b@1402 51
b@1402 52 \textbf{Main folder:}
b@1402 53 \begin{itemize}
b@2231 54 \item \texttt{CITING.txt, LICENSE.txt, README.md}: text files with, respectively, the citation which we ask to include in any work where this tool or any portion thereof is used, modified or otherwise; the license under which the software is shared; and a general readme file.
b@2231 55 \item \texttt{demo.html}: Several demonstrations of listening tests, using examples from the example\_eval folder
b@2231 56 \item \texttt{index.html}: webpage where interface should appear (append link to configuration XML, e.g. index.html?url=config.xml)
b@2231 57 \item \texttt{pythonServer.py}: webserver for running tests locally
b@2231 58 \item \texttt{pythonServer-legacy.py}: webserver with limited functionality (no automatic storing of output XML files)
b@2231 59 \\
b@2203 60 \end{itemize}
b@2209 61 \textbf{Analysis of results (\texttt{./analysis/})}
b@2231 62 \begin{itemize}
b@2231 63 \item \texttt{analyse.html}: analysis and diagnostics of a set of result XML files (legacy)
b@2231 64 \item \texttt{analysis.css}: analysis page style file
b@2231 65 \item \texttt{analysis.js}: analysis functions
b@2231 66 \item \texttt{index.html}: web page where analysis of stored results can be performed
b@1402 67 \end{itemize}
b@2231 68 \textbf{CSS files (\texttt{./css/})}
b@2231 69 \begin{itemize}
b@2231 70 \item \texttt{core.css}: core style file (edit to change appearance)
b@1402 71 \end{itemize}
b@2231 72 \textbf{Documentation (\texttt{./docs/})}
b@2231 73 \begin{itemize}
b@2231 74 \item AESPosterComp: PDF and \LaTeX source of Audio Engineering Society UK Sustaining Members event at Solid State Logic, Begbroke
b@2231 75 \item \href{http://c4dm.eecs.qmul.ac.uk/dmrn/events/dmrnp10/#posters}{DMRN+10}: PDF and \LaTeX source of poster for 10\textsuperscript{th} Digital Music Research Network One-Day workshop (``soft launch'')
b@2231 76 \item Instructions: PDF and \LaTeX source of these instructions
b@2231 77 \item Project Specification Document (\LaTeX/PDF)
b@2231 78 \item Results Specification Document (\LaTeX/PDF)
b@2231 79 \item SMC15: PDF and \LaTeX source of 12th Sound and Music Computing Conference paper \cite{waet}
b@2231 80 \item WAC2016: PDF and \LaTeX source of 2nd Web Audio Conference paper \cite{waetwac}
b@2231 81 \item WAC2016Poster: PDF and \LaTeX source of 2nd Web Audio Conference poster\\
b@2231 82 \end{itemize}
b@2231 83 \textbf{Interface files (\texttt{./interfaces/})}
b@2231 84 \begin{itemize}
b@2231 85 \item Each interface class has a JavaScript file and an optional CSS style file. These are loaded as needed.
b@2231 86 \end{itemize}
b@2231 87 \textbf{JavaScript code (\texttt{./js/})}
b@2231 88 \begin{itemize}
b@2231 89 \item \texttt{core.js}: JavaScript file with core functionality
b@2231 90 \item \texttt{jquery-2.1.4.js}: jQuery JavaScript Library
b@2231 91 \item \texttt{loudness.js}: Allows for automatic calculation of loudness of Web Audio API Buffer objects, return gain values to correct for a target loudness or match loudness between multiple objects
b@2231 92 \item \texttt{specification.js}: decodes configuration XML to JavaScript object
b@2231 93 \item \texttt{WAVE.js}: decodes and performs WAVE file byte level manipulation
b@2231 94 \item \texttt{xmllint.js}: XML validation
b@2231 95 \end{itemize}
b@2231 96 \textbf{Media files (\texttt{./media/})}
b@2231 97 \begin{itemize}
b@2231 98 \item \texttt{example}: contains example audio files 0.wav-10.wav which are short recordings at 44.1kHz, 16bit of a woman saying the corresponding number (useful for testing randomisation and general familiarisation with the interface).
b@2231 99 \end{itemize}
b@2231 100 \textbf{PHP scripts (\texttt{./php/})}
b@2231 101 \begin{itemize}
b@2231 102 \item \texttt{keygen.php}: generates a unique file name for saved results
b@2231 103 \item \texttt{pseudo.php}: allows for pseudo-random selection from a range of configuration XML files
b@2231 104 \item \texttt{save.php}: PHP script to store result XML files to web server
b@2231 105 \item PHP analysis scripts % ELABORATE
b@2231 106 \end{itemize}
b@2264 107 \textbf{Python scripts (\texttt{./python/})}
b@2264 108 \begin{itemize}
b@2264 109 \item Helpful Python and PHP scripts for extraction and visualisation of data.\\
b@2264 110 \end{itemize}
b@2231 111 \textbf{Output files (\texttt{./saves/})}
b@2231 112 \begin{itemize}
b@2231 113 \item The output XML files of tests will be stored here by default by the \texttt{pythonServer.py} script.\\
b@2231 114 \end{itemize}
b@2231 115 \textbf{Test creation tool (\texttt{./test\_create/})}
b@2231 116 \begin{itemize}
b@2231 117 \item Webpage for easily setting up your own test without having to delve into the XML.\\
b@2231 118 \end{itemize}
b@2231 119 \textbf{Tests (\texttt{./tests/})}
b@2231 120 \begin{itemize}
b@2231 121 \item This is where you can store your configuration XML files.
b@2231 122 \item Contains a folder with examples.\\ % ELABORATE
b@2231 123 \end{itemize}
b@2231 124 \textbf{XML specifications (\texttt{./xml/})}
b@2231 125 \begin{itemize}
b@2231 126 \item \texttt{scaledefinitions.xml}: marker text and positions for various scales
b@2231 127 \item \texttt{test-schema.xsd}: definition of configuration and result XML file structure\\ % ELABORATE
b@2231 128 \end{itemize}
b@2231 129
b@2231 130 % \textbf{Example project (\texttt{./example\_eval/})}
b@2231 131 % \begin{itemize}
b@2231 132 % \item An example of what the set up XML should look like,
b@2231 133 % \end{itemize}
b@1402 134
b@1435 135 \subsection{Compatibility}
b@1402 136 As Microsoft Internet Explorer doesn't support the Web Audio API\footnote{\url{http://caniuse.com/\#feat=audio-api}}, you will need another browser like Google Chrome, Safari or Firefox (all three are tested and confirmed to work).
b@1435 137
nicholas@2235 138 %Firefox does not currently support other bit depths than 8 or 16 bit for PCM wave files. In the future, this will throw a warning message to tell the user that their content is being quantised automatically. %Nick? Right? To be removed if and when actually implemented
nicholas@2235 139 % REPLY: Brecht, implemented our own in WAVE.js. Firefox have said they will support all bit-depth in the future.
b@1402 140
nicholas@2235 141 The tool is platform-independent and works in any browser that supports the Web Audio API. It does not require any specific, proprietary software. However, in case the tool is hosted locally (i.e. you are not hosting it on an actual webserver) you will need Python (2.7 or 3.x), which is a free programming language - see the next paragraph.
b@1402 142
b@1435 143 \clearpage
b@1402 144
b@2233 145 \section{Quick start}
b@2233 146 This document aims to provide an overview of all features and how to use them. However, if you are just trying out this tool, or you need to put together a test very quickly, or you simply don't want to read through all the details first, this section gives you the bare necessities to put together a simple listening test very quickly.
b@2233 147
b@2233 148 \begin{itemize} % WIP
b@2233 149 \item Download the tool (see Section~\ref{sec:installation})
b@2233 150 \item Copy the tool to a PHP-enabled web server if you have access to one.
nicholas@2235 151 \item Go to \path{test_create.html} and configure your test.
nicholas@2235 152 \item Save your test file in the folder \path{.\tests\}.
nicholas@2235 153 \item Your test will be live at \path{[web server address]/index.html?url=tests/[testname].xml}. If you are not using a web server, you can simulate one locally by running
b@2264 154 \path{python/pythonServer.py} (requires Python), after which you can access the test at \\ % hack
nicholas@2235 155 \path{http://localhost:8000/index.html?url=tests/[testname].xml}
b@2233 156 \end{itemize}
b@2233 157
b@2233 158 \clearpage
b@1402 159
b@2209 160 \section{Test setup} % TO DO: Linux (Android, iOS)
b@1402 161
b@1402 162 \subsection{Sample rate}
b@1402 163 Depending on how the experiment is set up, audio is resampled automatically (the Web Audio default) or the sample rate is enforced. In the latter case, you will need to make sure that the sample rate of the system is equal to the sample rate of these audio files. For this reason, all audio files in the experiment will have to have the same sample rate.
b@1402 164
b@1402 165 Always make sure that all other digital equipment in the playback chain (clock, audio interface, digital-to-analog converter, ...) is set to this same sample rate.
b@1402 166
b@1402 167 Note that upon changing the sampling rate, the browser will have to be restarted for the change to take effect.
b@1402 168
b@1402 169 \subsubsection{Mac OS X}
b@1402 170 To change the sample rate in Mac OS X, go to \textbf{Applications/Utilities/Audio MIDI Setup} or find this application with Spotlight (see Figure \ref{fig:audiomidisetup}). Then select the output of the audio interface you are using and change the `Format' to the appropriate number. Also make sure the bit depth and channel count are as desired.
b@1402 171 If you are using an external audio interface, you may have to go to the preference pane of that device to change the sample rate.
b@1402 172
b@1402 173 Also make sure left and right channel gains are equal, as some applications alter this without changing it back, leading to a predominantly louder left or right channel. See Figure \ref{fig:audiomidisetup} for an example where the channel gains are different.
b@1402 174
b@1402 175 \begin{figure}[tb]
b@1402 176 \centering
b@1402 177 \includegraphics[width=.65\textwidth]{img/audiomidisetup.png}
b@1402 178 \caption{The Audio MIDI Setup window in Mac OS X}
b@1402 179 \label{fig:audiomidisetup}
b@1402 180 \end{figure}
b@1402 181
b@1402 182 \subsubsection{Windows}
b@1402 183 To change the sample rate in Windows, right-click on the speaker icon in the lower-right corner of your desktop and choose `Playback devices'. Right-click the appropriate playback device and click `Properties'. Click the `Advanced' tab and verify or change the sample rate under `Default Format'. % NEEDS CONFIRMATION
nicholas@2235 184 If you are using an external audio interface, you may have to go to the preference pane of that device to change the sample rate.
b@1402 185
b@1402 186 \subsection{Local test}
b@1402 187 If the test is hosted locally, you will need to run the local webserver provided with this tool.
b@1402 188
nickjillings@1446 189 \subsubsection{Mac OS X \& Linux}
b@1402 190
nickjillings@1446 191 On Mac OS X, Python comes preinstalled, as with most Unix/Linux distributions.
b@1402 192
nicholas@2235 193 Open the Terminal (find it in \textbf{Applications/Terminal} or via Spotlight), and go to the folder you downloaded. To do this, type \texttt{cd [folder]}, where \texttt{[folder]} is the folder where to find the \texttt{pythonServer.py} script you downloaded. For instance, if the location is \texttt{/Users/John/Documents/WebAudioEvaluationToolbox/}, then type
b@1402 194
nicholas@2235 195 \texttt{cd /Users/John/Documents/WebAudioEvaluationToolbox/}
b@1402 196
b@1402 197 Then hit enter and run the Python script by typing
b@1402 198
b@2264 199 \texttt{python python/pythonServer.py}
b@1402 200
b@1402 201 and hit enter again. See also Figure \ref{fig:terminal}.
b@1402 202
b@1402 203 \begin{figure}[htbp]
b@1402 204 \begin{center}
b@1402 205 \includegraphics[width=.75\textwidth]{pythonServer.png}
b@1402 206 \caption{Mac OS X: The Terminal window after going to the right folder (\texttt{cd [folder\_path]}) and running \texttt{pythonServer.py}.}
b@1402 207 \label{fig:terminal}
b@1402 208 \end{center}
b@1402 209 \end{figure}
b@1402 210
b@2365 211 Alternatively, you can simply type \texttt{python} (followed by a space) and drag the file into the Terminal window from Finder. % DOESN'T WORK YET
b@1402 212
nickjillings@1446 213 You can leave this running throughout the different experiments (i.e. leave the Terminal open). Once running the terminal will report the current URL to type into your browser to initiate the test, usually this is http://localhost:8000/.
nicholas@2235 214 On OSX 10.10 or newer, you may get a dialogue asking if Python can accept incomming connections, click yes.
b@1402 215
b@1402 216 To start the test, open the browser and type
b@1402 217
b@1402 218 \texttt{localhost:8000}
b@1402 219
b@1402 220 and hit enter. The test should start (see Figure \ref{fig:test}).
b@1402 221
b@1402 222 To quit the server, either close the terminal window or press Ctrl+C on your keyboard to forcibly shut the server.
b@1402 223
b@1402 224 \subsubsection{Windows}
b@1402 225
nicholas@2235 226 On Windows, Python is not generally preinstalled and therefore has to be downloaded\footnote{\url{https://www.python.org/downloads/windows/}} and installed to be able to run scripts such as the local webserver, necessary if the tool is hosted locally.
b@1402 227
nicholas@2235 228 Once installed, simply double click the Python script \texttt{pythonServer.py} in the folder you downloaded.
b@1402 229
b@1402 230 You may see a warning like the one in Figure \ref{fig:warning}. Click `Allow access'.
b@1402 231
b@1402 232 \begin{figure}[htbp]
b@1402 233 \begin{center}
b@1402 234 \includegraphics[width=.6\textwidth]{warning.png}
b@1402 235 \caption{Windows: Potential warning message when executing \texttt{pythonServer.py}.}
b@1402 236 \label{fig:warning}
b@1402 237 \end{center}
b@1402 238 \end{figure}
b@1402 239
b@1402 240 The process should now start, in the Command prompt that opens - see Figure \ref{fig:python}.
b@1402 241
b@1402 242 \begin{figure}[htbp]
b@1402 243 \begin{center}
b@1402 244 \includegraphics[width=.75\textwidth]{python.png}
b@1402 245 \caption{Windows: The Command Prompt after running \texttt{pythonServer.py} and opening the corresponding website.}
b@1402 246 \label{fig:python}
b@1402 247 \end{center}
b@1402 248 \end{figure}
b@1402 249
b@1402 250 You can leave this running throughout the different experiments (i.e. leave the Command Prompt open).
b@1402 251
b@1402 252 To start the test, open the browser and type
b@1402 253
b@1402 254 \texttt{localhost:8000}
b@1402 255
b@1402 256 and hit enter. The test should start (see Figure \ref{fig:test}).
b@1402 257
b@1402 258 \begin{figure}[htb]
b@1402 259 \begin{center}
b@1402 260 \includegraphics[width=.8\textwidth]{test.png}
b@1402 261 \caption{The start of the test in Google Chrome on Windows 7.}
b@1402 262 \label{fig:test}
b@1402 263 \end{center}
b@1402 264 \end{figure}
b@1402 265
b@2365 266 If at any point in the test the participant reports weird behaviour or an error of some kind, or the test needs to be interrupted, please notify the experimenter and/or refer to Section~\ref{sec:troubleshooting}.
b@2365 267
b@2365 268 When the test is over (the subject should see a message to that effect), the output XML file containing all collected data should have appeared in `saves/'. The names of these files are `test-0.xml', `test-1.xml', etc., in ascending order. The Terminal or Command prompt running the local web server will display the following file name. If such a file did not appear, please again refer to Section~\ref{sec:troubleshooting}. % Is this still the case?
b@2365 269
b@2365 270 It is advised that you back up these results as often as possible, as a loss of this data means that the time and effort spent by the subject(s) has been in vain. Save the results to an external or network drive, and/or send them to the experimenter regularly.
b@2365 271
b@2365 272 To start the test again for a new participant, you do not need to close the browser or shut down the Terminal or Command Prompt. Simply refresh the page or go to \texttt{localhost:8000} again, a new session will be created.
b@1402 273
b@1402 274
b@1402 275 \subsection{Remote test}
b@2365 276 Put all files on a web server which supports PHP. This allows the `save.php' script to store the XML result files in the `saves/' folder.
nicholas@2235 277
nicholas@2235 278 Ensure that the \path{saves/} directory has public read-write access. On most linux servers this can be achieved using the command \texttt{sudo chmod 777 ./saves}.
b@1435 279
b@1435 280 Make sure the \texttt{projectReturn} attribute of the \texttt{setup} node is set to the \texttt{save.php} script.
b@1435 281
b@2386 282 Then, just go to the URL of the corresponding HTML file, e.g. \url{http://server.com/path/to/WAET/index.html?url=test/my-test.xml}. If storing on the server doesn't work at submission (e.g. if the \texttt{projectReturn} attribute isn't properly set or PHP does not have the correct permissions), the result XML file will be presented to the subject on the client side, as a `Save file' link.
b@1435 283
nickjillings@1363 284 \subsection{Load a test / Multiple test documents}
b@2386 285 By default the \texttt{test.html} page will load an empty page. To automatically load a test document, you need to append the location in the URL. If your URL is normally \url{http://localhost:8000/index.html} you would append the following: \url{?url=/path/to/your/test.xml}. Replace the fields with your actual path, the path is local to the running directory, so if you have your test in the directory \texttt{example\_eval} called \texttt{project.xml} you would append \url{?url=/example\_eval/project.xml}.
b@1435 286
b@1402 287 \clearpage
b@1435 288
b@1435 289 \section{Interfaces}
b@1435 290
b@1435 291 The Web Audio Evaluation Tool comes with a number of interface styles, each of which can be customised extensively, either by configuring them differently using the many optional features, or by modifying the JavaScript files.
b@1435 292
nickjillings@1363 293 To set the interface style for the whole test, set the attribute of the \texttt{setup} node to \texttt{interface="APE"}, where \texttt{"APE"} is one of the interface names below.
b@1435 294
b@2209 295 \subsection{Templates}
b@2209 296 This section describes the different templates available in the Interfaces folder (\texttt{./interfaces}),
b@1435 297
b@2209 298 \begin{description}
b@2209 299 \item[Blank] Use this template to start building your own, custom interface (JavaScript and CSS).
b@2209 300
b@2209 301 \item[AB] Performs a pairwise comparison, but supports n-way comparison (in the example we demonstrate it performing a 7-way comparison).
b@2209 302
b@2209 303 \item[ABX] Like AB, but with an unknown sample X which has to be identified as being either A or B.
b@2209 304
b@2209 305 \item[APE] The APE interface is based on \cite{ape}, and consists of one or more axes, each corresponding with an attribute to be rated, on which markers are placed. As such, it is a multiple stimulus interface where (for each dimension or attribute) all elements are on one axis so that they can be maximally compared against each other, as opposed to rated individually or with regards to a single reference.
b@2209 306 It also contains an optional text box for each element, to allow for clarification by the subject, tagging, and so on.
b@2209 307
b@2209 308 \item[Discrete] Each audio element is given a discrete set of values based on the number of slider options specified. For instance, Likert specifies 5 values and therefore each audio element must be one of those 5 values.
b@2209 309
b@2209 310 \item[Horizontal sliders] Creates the same interfaces as MUSHRA except the sliders are horizontal, not vertical.
b@2209 311
b@2209 312 \item[MUSHRA] This is a straightforward implementation of \cite{mushra}, especially common for the rating of audio quality, for instance for the evaluation of audio codecs. This can also operate any vertical slider style test and does not necessarily have to match the MUSHRA specification.
b@2209 313 \end{description}
b@2209 314
b@2209 315
b@2209 316 \subsection{Examples}
b@2209 317 Below are a number of established interface types, which are all supported using the templates from the previous section. % Confirm?
b@2209 318 From \cite{waetwac}.
b@2209 319
b@2209 320 % TODO: add labels like (\textbf{\texttt{horizontal-sliders}}) to show which type of interface can be created using which template
b@2273 321
b@2209 322 \begin{itemize}
b@2209 323 \item AB Test / Pairwise comparison~\cite{lipshitz1981great,david1963method}: Two stimuli presented simultaneously, participant selects a preferred stimulus.
b@2209 324 \item ABC/HR (ITU-R BS. 1116)~\cite{recommendation19971116} (Mean Opinion Score: MOS): each stimulus has a continuous scale (5-1), labeled as Imperceptible, Perceptible but not annoying, Slightly annoying, Annoying, Very annoying.
b@2209 325 \item -50 to 50 Bipolar with Ref: each stimulus has a continuous scale -50 to 50 with default values as 0 in middle and a reference.
b@2209 326 \item Absolute Category Rating (ACR) Scale~\cite{rec1996p}: Likert but labels are Bad, Poor, Fair, Good, Excellent
b@2209 327 \item ABX Test~\cite{clark1982high}: Two stimuli are presented along with a reference and the participant has to select a preferred stimulus, often the closest to the reference.
b@2273 328 \item APE~\cite{ape}: Multiple stimuli on one or more axes for inter-sample rating.
b@2209 329 %\item APE style 2D \cite{ape}: Multiple stimuli on a 2D plane for inter-sample rating (e.g. Valence Arousal). % TO BE IMPLEMENTED
b@2209 330 \item Comparison Category Rating (CCR) Scale~\cite{rec1996p}: ACR \& DCR but 7 point scale, with reference: Much better, Better, Slightly better, About the same, Slightly worse, Worse, Much worse.
b@2209 331 \item Degredation Category Rating (DCR) Scale~\cite{rec1996p}: ABC \& Likert but labels are (5) Inaudible, (4) Audible but not annoying, (3) Slightly annoying, (2) Annoying, (1) Very annoying.
b@2209 332 \item ITU-R 5 Point Continuous Impairment Scale~\cite{rec1997bs}: Same as ABC/HR but with a reference.
b@2209 333 \item Likert scale~\cite{likert1932technique}: each stimulus has a five point scale with values: Strongly agree, Agree, Neutral, Disagree and Strongly disagree.
b@2209 334 \item MUSHRA (ITU-R BS. 1534)~\cite{recommendation20031534} Multiple stimuli are presented and rated on a continuous scale, which includes a reference, hidden reference and hidden anchors.
b@2209 335 \item Pairwise Comparison (Better/Worse)~\cite{david1963method}: every stimulus is rated as being either better or worse than the reference.
b@2209 336 \item Rank Scale~\cite{pascoe1983evaluation}: stimuli ranked on single horizontal scale, where they are ordered in preference order.
b@2209 337 \item 9 Point Hedonic Category Rating Scale~\cite{peryam1952advanced}: each stimulus has a seven point scale with values: Like extremely, Like very much, Like moderate, Like slightly, Neither like nor dislike, Dislike extremely, Dislike very much, Dislike moderate, Dislike slightly. There is also a provided reference.
b@2209 338 \end{itemize}
b@2209 339
b@2273 340
b@2209 341 \subsection{Building your own interface}
b@2209 342
b@2209 343 \subsubsection{Nodes to familiarise}
b@2209 344 Core.js handles several very important nodes which you should become familiar with. The first is the Audio Engine, initialised and stored in variable `AudioEngineContext'. This handles the playback of the web audio nodes as well as storing the `AudioObjects'. The `AudioObjects' are custom nodes which hold the audio fragments for playback. These nodes also have a link to two interface objects, the comment box if enabled and the interface providing the ranking. On creation of an `AudioObject' the interface link will be nulled, it is up to the interface to link these correctly.
b@2209 345
b@2209 346 The specification document will be decoded and parsed into an object called `specification'. This will hold all of the specifications various nodes. The test pages and any pre/post test objects are processed by a test state which will proceed through the test when called to by the interface. Any checks (such as playback or movement checks) are to be completed by the interface before instructing the test state to proceed. The test state will call the interface on each page load with the page specification node.
b@2209 347
b@2209 348 \subsubsection{Modifying \texttt{core.js}}
b@2209 349 Whilst there is very little code actually needed, you do need to instruct core.js to load your interface file when called for from a specification node. There is a function called `loadProjectSpecCallback' which handles the decoding of the specification and setting any external items (such as metric collection). At the very end of this function there is an if statement, add to this list with your interface string to link to the source. There is an example in there for both the APE and MUSHRA tests already included. Note: Any updates to core.js in future work will most likely overwrite your changes to this file, so remember to check your interface is still here after any update that interferes with core.js.
b@2209 350 Any further files can be loaded here as well, such as css styling files. jQuery is already included.
b@2209 351
b@2209 352 \subsubsection{Building the Interface}
nicholas@2235 353 Your interface file will get loaded automatically when the `interface' attribute of the setup node matches the string in the `loadProjectSpecCallback' function. The following functions must be defined in your interface file. A template file is provided in \path{interfaces\blank.js}.
b@2209 354 \begin{itemize}
b@2209 355 \item \texttt{loadInterface} - Called once when the document is parsed. This creates any necessary bindings, such as to the metric collection classes and any check commands. Here you can also start the structure for your test such as placing in any common nodes (such as the title and empty divs to drop content into later).
b@2209 356 \item \texttt{loadTest(audioHolderObject)} - Called for each page load. The audioHolderObject contains a specification node holding effectively one of the audioHolder nodes.
b@2209 357 \item \texttt{resizeWindow(event)} - Handle for any window resizing. Simply scale your interface accordingly. This function must be here, but can me an empty function call.
b@2209 358 \end{itemize}
b@2209 359
b@2209 360 \textbf{loadInterface}\\
b@2209 361 This function is called by the interface once the document has been parsed since some browsers may parse files asynchronously. The best method is simply to put `loadInterface()' at the top of your interface file, therefore when the JavaScript engine is ready the function is called.
b@2209 362
b@2209 363 By default the HTML file has an element with id ``topLevelBody'' where you can build your interface. Make sure you blank the contents of that object. This function is the perfect time to build any fixed items, such as the page title, session titles, interface buttons (Start, Stop, Submit) and any holding and structure elements for later on.
b@2209 364
b@2209 365 At the end of the function, insert these two function calls: testState.initialise() and testState.advanceState();. This will actually begin the test sequence, including the pre-test options (if any are included in the specification document).
b@2209 366
b@2209 367 \textbf{loadTest(audioHolderObject)}\\
b@2209 368 This function is called on each new test page. It is this functions job to clear out the previous test and set up the new page. Use the function audioEngineContext.newTestPage(); to instruct the audio engine to prepare for a new page. ``audioEngineContext.audioObjects = [];'' will delete any audioObjects, interfaceContext.deleteCommentBoxes(); will delete any comment boxes and interfaceContext.deleteCommentQuestions(); will delete any extra comment boxes specified by commentQuestion nodes.
b@2209 369
b@2209 370 This function will need to instruct the audio engine to build each fragment. Just passing the constructor each element from the audioHolderObject will build the track, audioEngineContext.newTrack(element) (where element is the audioHolderObject audio element). This will return a reference to the constructed audioObject. Decoding of the audio will happen asynchronously.
b@2209 371
b@2209 372 You also need to link audioObject.interfaceDOM with your interface object for that audioObject. The interfaceDOM object has a few default methods. Firstly it must start disabled and become enabled once the audioObject has decoded the audio (function call: enable()). Next it must have a function exportXMLDOM(), this will return the xml node for your interface, however the default is for it to return a value node, with textContent equal to the normalised value. You can perform other functions, but our scripts may not work if something different is specified (as it will breach our results specifications). Finally it must also have a method getValue, which returns the normalised value.
b@2209 373
b@2209 374 It is also the job the interfaceDOM to call any metric collection functions necessary, however some functions may be better placed outside (for example, the APE interface uses drag and drop, therefore the best way was to call the metric functions from the dragEnd function, which is called when the interface object is dropped). Metrics based upon listening are handled by the audioObject. The interfaceDOM object must manage any movement metrics. For a list of valid metrics and their behaviours, look at the project specification document included in the repository/docs location. The same goes for any checks required when pressing the submit button, or any other method to proceed the test state.
b@1435 375
b@1402 376
b@1435 377 \clearpage
b@1435 378
nickjillings@1363 379 \section{Project XML}
nickjillings@1363 380
nicholas@2235 381 Each test is defined by its project XML file, examples of these can be seen in the ./tests/examplesl/ directory.
nickjillings@1363 382
nickjillings@1363 383 In the XML there are several nodes which must be defined:
nickjillings@1363 384 \begin{itemize}
nickjillings@1363 385 \item \texttt{<waet>}: The root node.
nickjillings@1363 386 \item \texttt{<setup>}: The first child node, defines whole-test parameters
nicholas@2235 387 \item \texttt{<page>}: Specifies a test page, attached \emph{after} the \texttt{<setup>} nodes.
nickjillings@1363 388 \item \texttt{<audioelement>}: Specifies an audio element.
nickjillings@1363 389 \end{itemize}
nickjillings@1363 390
nicholas@2235 391 The test uses XML validation, so the ordering of nodes is important to pass this validation. Some nodes also have specific attributes which must be set and may even have a certain format to apply them. This is done so error checking can be performed to catch easy to find errors before loading and running a test session. If your project XML fails this validation, all the errors will be listed.
nickjillings@1363 392
nickjillings@1363 393 Before identifying any features, this part will walk you through the available nodes, their function and their attributes.
nickjillings@1363 394
nickjillings@1363 395 \subsection{Root}
nickjillings@1363 396 The root node is \texttt{<waet>}, it must have the following attributes:
nickjillings@1363 397
nickjillings@1363 398 \texttt{xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"}
nickjillings@1363 399
nickjillings@1363 400 \texttt{xsi:noNamespaceSchemaLocation="test-schema.xsd"}.
nickjillings@1363 401
nickjillings@1363 402 This will ensure it is checked against the XML schema for validation.
nickjillings@1363 403
nickjillings@1363 404 \subsection{Set up}
nickjillings@1363 405 The first child node, \texttt{<setup>} specifies any one time and global parameters. It takes the following attributes:
nickjillings@1363 406 \begin{itemize}
nickjillings@1363 407 \item \texttt{interface}: String, mandatory, specifies the interface to load
b@2103 408 \item \texttt{projectReturn}: URL, mandatory, specifies the return point. Can be a 3rd party server or the local server. Set to null to disable automatic saving. Specifying ``save.php'' will trigger the return if either the PHP or python servers are used. On error, it will always default to presenting the save on page.
b@2386 409 \item \texttt{returnURL}: Upon successful completion and submission of the test, this URL will be opened. This can be a presentation of the results thus far, some type of reward, or a page with links to other tests.
nickjillings@1363 410 \item \texttt{randomiseOrder}: Boolean, optional, if true it will randomise the order of the test pages. Default is false.
nicholas@2235 411 \item \texttt{poolSize}: non-negative integer, optional. Specifies the number of test pages to actually test with. Combined with randomiseOrder being true will give a random set of test pages per participant from the given pool of \texttt{<page>} nodes. Specifying 0 disables this option, default is 0.
b@2209 412 \item \texttt{loudness}: non-positive integer, optional. Set the default LUFS target value. See Section~\ref{sec:loudness} for more.
b@2209 413 \item \texttt{sampleRate}: positive integer, optional. If set, the sample rate reported by the Web Audio API must match this number. See Section~\ref{sec:samplerate}.
nicholas@2235 414 \item \texttt{calibration}: boolean, optional. If true, a simple hearing test is presented to user to gather system frequency response (DAC, listening device and subject hearing). Default is false.
nicholas@2235 415 \item \texttt{crossFade}: decimal greater than or equal to 0.0, optional. Define the crossFade between fragments when clicked in seconds. Default is 0.0s.
nicholas@2235 416 \item \texttt{preSilence}: decimal greater than or equal to 0.0, optional. Add a portion of silence to all elements in the test at the beginning of the buffer in seconds. Default is 0.0s
nicholas@2235 417 \item \texttt{postSilence}: decimal greater than or equal to 0.0, optional. Add a portion of silence to all elements in the test at the end of the buffer in seconds. Default is 0.0s
nickjillings@1363 418 \end{itemize}
nickjillings@1363 419
nickjillings@1363 420 The \texttt{<setup>} node takes the following child nodes, note these must appear in this order:
nickjillings@1363 421 \begin{itemize}
b@2209 422 \item \texttt{<survey>}: Min of 0, max of 2 occurences. See Section~\ref{sec:survey}
nickjillings@1363 423 \item \texttt{<metric>}: Must appear only once.
nickjillings@1363 424 \item \texttt{<interface>}: Must appear only once.
nickjillings@1363 425 \end{itemize}
nickjillings@1363 426
nickjillings@1363 427 \subsection{Page}
nickjillings@1365 428 \label{sec:page}
nickjillings@1363 429 The only other first level child nodes, these specify the test pages. It takes the following attributes:
nickjillings@1363 430 \begin{itemize}
nickjillings@1363 431 \item \texttt{id}: ID, mandatory. A string which must be unique across the entire XML. It is used to identify the page on test completion as pages are returned in the results in the order they appeared, not specified.
b@2103 432 \item \texttt{hostURL}: URL, mandatory. Used in conjuction with the \texttt{<audioelement>} url to specify where the audio files are located. For instance if all your files are in the directory \texttt{./test/} you can set this attribute to ``/test/'' and the \texttt{<audioelement>} url attribute only needs to file name. Set to ``'' if no hostURL prefix desired.
b@2209 433 \item \texttt{randomiseOrder}: Boolean, optional. If true the audio fragments are presented randomly rather than the order specified. See Section~\ref{sec:randomisation}. Default is false.
nickjillings@1363 434 \item \texttt{repeatCount}: non-negative integer, optional. Specify the number of times to repeat the test page (re-present). Each presentation will appear as an individual page in the results. Default is 0.
nickjillings@1363 435 \item \texttt{loop}: Boolean, optional. If true, the audio elements will loop synchronously with each other. See \ref{sec:looping}. Default is false.
b@2209 436 \item \texttt{loudness}: non-positive integer, optional. Set the LUFS target value for this page. Supersedes the \texttt{<setup>} loudness attribute for this page. See Section~\ref{sec:loudness} for more.
nicholas@2235 437 \item \texttt{label}: enumeration, optional. Set the label to one of the following
nicholas@2235 438 \begin{itemize}
nicholas@2235 439 \item \texttt{default}: The default by the interface (Default if undefined)
nicholas@2235 440 \item \texttt{none}: Show no labels
nicholas@2235 441 \item \texttt{number}: Show natural numbers starting at index 1
b@2365 442 \item \texttt{letter}: Show letters starting at `a'
b@2365 443 \item \texttt{capital}: Show letters starting at `A'
nicholas@2235 444 \end{itemize}
nicholas@2235 445 \item \texttt{poolSize}: non-negative integer, optional. Determine the number of \texttt{<audioelement>} nodes to take from those defined. For instance if \texttt{poolSize=3} and there are 4 audio elements, only 3 will actually be loaded and presented to the user.
nicholas@2235 446 \item \texttt{alwaysInclude}: boolean, optional. If the parent \texttt{<setup>} node has poolSize set, you can enforce the page to always be selected by setting alwaysInclude to true. Default is false
nicholas@2235 447 \item \texttt{preSilence}: decimal greater than or equal to 0.0, optional. Add a portion of silence to all elements in the page at the beginning of the buffer in seconds. Supercedes any value set in \texttt{<setup>}. Default is 0.0s
nicholas@2235 448 \item \texttt{postSilence}: decimal greater than or equal to 0.0, optional. Add a portion of silence to all elements in the test at the end of the buffer in seconds. Supercedes any value set in \texttt{<setup>}. Default is 0.0s
nicholas@2235 449
nickjillings@1363 450 \end{itemize}
nickjillings@1363 451
nickjillings@1363 452 The \texttt{<page>} node takes the following child, nodes note these must appear in this order:
nickjillings@1363 453 \begin{itemize}
nickjillings@1363 454 \item \texttt{<title>}: Appear once or not at all. The text content of this node specifies the title of the test page, for instance \texttt{<title>John Doe's Test</title>}
nickjillings@1363 455 \item \texttt{<commentboxprefix}: Appear once or not at all. The text content specifies the prefix of the comment boxes, see \ref{sec:commentboxes}.
nickjillings@1363 456 \item \texttt{<interface>}: Must appear only once.
b@2209 457 \item \texttt{<audioelement>}: Minimum of one. Specifies an audio element, see Section~\ref{sec:audioelement}.
b@2209 458 \item \texttt{<commentquestion>}: Min of 0, max unlimited occurences. See Section~\ref{sec:commentboxes}.
b@2209 459 \item \texttt{<survey>}: Min of 0, max of 2 occurences. See Section~\ref{sec:survey}
nickjillings@1363 460 \end{itemize}
nickjillings@1363 461
nickjillings@1363 462 \subsection{Survey}
nickjillings@1363 463 \label{sec:survey}
nickjillings@1363 464 These specify any survey items to be presented. The must be a maximum of two of these per \texttt{<setup>} and \texttt{<page>} nodes. These have one attribute, location, which must be set to one of the following: before, pre, after or post. In this case before == pre and after == post. This specifies where the survey must appear before or after the node it is associated with. When a child of \texttt{<setup>} then pre/before will be shown before the first test page and after/post shown after completing the last test page. When a child of \texttt{<page>} then pre/before is before the test commences and after/post is once the test has been submitted.
nickjillings@1363 465
nickjillings@1363 466 The survey node takes as its only set of childs the \texttt{<surveyentry>} node of which there can be any number.
nickjillings@1363 467
nickjillings@1363 468 \subsubsection{Survey Entry}
nickjillings@1363 469 These nodes have the following attributes, which vary depending on the survey type wanted:
nickjillings@1363 470 \begin{itemize}
nickjillings@1363 471 \item \texttt{id}: ID, mandatory. Must be unique across the entire XML, used to identify the response in the results.
nickjillings@1363 472 \item \texttt{type}: String, mandatory. Must be one of the following: statement, question, checkbox, radio or number. This defines the type to show.
nickjillings@1363 473 \item \texttt{mandatory}: Boolean, optional. Defines if the survey must have a response or not. Does not apply to statements. Default is false.
nickjillings@1363 474 \item \texttt{min}: Number, optional. Only applies when \texttt{type="number"}, the minimum valid response.
nickjillings@1363 475 \item \texttt{max}: Number, optional. Only applies when \texttt{type="number"}, the maximum valid response.
nickjillings@1363 476 \item \texttt{boxsize}: String, optional. Only applies when \texttt{type="question"} and must be one of the following: normal (default), small, large or huge.
nickjillings@1363 477 \end{itemize}
nickjillings@1363 478
nickjillings@1363 479 The nodes have the following children, which vary depending on the survey type wanted.
nickjillings@1363 480 \begin{itemize}
nickjillings@1363 481 \item \texttt{<statement>}: Must appear only once. Its text content specifies the text to appear as the statement or question for the user to respond to.
nickjillings@1363 482 \item \texttt{<option>}: Only valid if the parent node has the attribute \texttt{type} set to checkbox or radio. Has attribute \texttt{name} to identify the selected option in the results. The text content is the text to show next to the radio/checkbox.
nickjillings@1363 483 \end{itemize}
nickjillings@1363 484
nickjillings@1363 485 \subsection{Interface}
nickjillings@1363 486 This node specifies any interface specific options and test parameters. It has an optional \texttt{name} attribute used to set the axis name (where applicable), such as the multi-axis APE interface. Specifying multiple interface nodes in a \texttt{<page>} node will trigger multiple axis where applicable, otherwise only the \emph{first node} will be used and the rest ignored.
nickjillings@1363 487
nickjillings@1363 488 The node has the following children, note the order these must appear in is as follows:
nickjillings@1363 489 \begin{itemize}
nickjillings@1363 490 \item \texttt{title}: Min 0, max 1 occurence. The text content specifies the name of the axis as shown to the user.
b@2209 491 \item \texttt{interfaceoption}: Min 0, max unbounded. Specifies the interface options. See Section~\ref{sec:interfaceoption}.
b@2427 492 \item \texttt{scales}: Min 0, max 1 occurence. Contains \texttt{<scalelabel>} nodes which define the displayed scales. See Section~\ref{sec:multiscale}.
nickjillings@1363 493 \end{itemize}
nickjillings@1363 494
nickjillings@1363 495 \subsection{Audio Element}
b@2209 496 \label{sec:audioelement}
b@2209 497 Appear as children of the \texttt{page} node. Each of these specify an individual interface fragment to display. Multiple fragments can reference the same file (allowing for repetition with different parameters or blind-doubles). The node has the following attributes:
b@2209 498 \begin{itemize}
b@2209 499 \item \texttt{id}: ID, mandatory. Must be unique across the test page. Used to identify the specific fragment in the results.
nicholas@2235 500 \item \texttt{name}: String, optional. If you wish to group fragment across pages when performing result analysis, set the group name here.
b@2209 501 \item \texttt{url}: URL, mandatory. Used with the parent \texttt{page} nodes' \texttt{hostURL} attribute to get the full url of the audio file to load.
b@2209 502 \item \texttt{gain}: Float, optional. Specify the gain in decibels to apply to the node after loudness normalisation. Default is 0.
b@2209 503 \item \texttt{type}: String, optional. Must be one of the following: normal (default when not specified), anchor, reference or outside-reference. Normal, anchor and reference are presented as normal, outside-reference presents the node as a separate interface option.
b@2209 504 \item \texttt{marker}: Integer between 0 and 100, optional. Only used when \texttt{type="anchor"|"reference"}. See Section~\ref{sec:referencesandanchors}.
nicholas@2235 505 \item \texttt{loudness}: Set the loudness of this element in LUFS. Supercedes all other set values. See Section~\ref{sec:loudness}.
nicholas@2235 506 \item \texttt{alwaysInclude}: boolean, optional. If the parent \texttt{<page>} node has poolSize set, you can enforce the element to always be selected by setting alwaysInclude to true. Default is false
nicholas@2235 507 \item \texttt{preSilence}: decimal greater than or equal to 0.0, optional. Add a portion of silence to all elements in the page at the beginning of the buffer in seconds. Supercedes any other value. Default is 0.0s
nicholas@2235 508 \item \texttt{postSilence}: decimal greater than or equal to 0.0, optional. Add a portion of silence to all elements in the test at the end of the buffer in seconds. Supercedes any other value. Default is 0.0s
b@2209 509 \end{itemize}
nickjillings@1363 510
b@2209 511 \clearpage
nickjillings@1363 512
b@1435 513 \section{Features}
b@1435 514
b@1447 515 This section covers the different features implemented in the Web Audio Evaluation Tool, how to use them, and what to know about them.
b@1435 516
b@1435 517 Unless otherwise specified, \emph{each} feature described here is optional, i.e. it can be enabled or disabled and adjusted to some extent.
b@1435 518
nickjillings@1363 519 As the example project showcases (nearly) all of these features, please refer to its configuration XML document for a demonstration of how to enable and adjust them.
b@1435 520
nickjillings@1363 521 \subsection{Interface options}
b@2209 522 \label{sec:interfaceoption}
nickjillings@1363 523 The interface node has children of interface options which are used to specify modifications to the test environment. These are divided into two catagories: check and show. Check are used to specify conditions which must be met before a page can be completed, these include checking all fragments have been played or checking all fragments have a comment and so on. Show is used to show an optional on page element or control, such as the playhead or master volume.
nickjillings@1363 524
b@2103 525 Check items have the attribute ``type'' set to ``check''. The following list gives the string to give the ``name'' attribute along with a description of the check.
nickjillings@1363 526 \begin{itemize}
b@2209 527 \item \texttt{fragmentPlayed}: Checks that all fragments have been at least partially played
b@2209 528 \item \texttt{fragmentFullPlayback}: Checks that all fragments have been fully played. \emph{NOTE:} This will always clear if the page is looping as it is not possible to know every sample has been played.
b@2209 529 \item \texttt{fragmentMoved}: Checks that all fragments have been moved. This is interface dependent, for instance on AB this will always clear as there is no movement.
b@2209 530 \item \texttt{fragmentComments}: Cheks that all fragments have a comment. Will clear if there are no on page comments but with a console warning.
b@2209 531 \item \texttt{scalerange}: Has two extra attributes ``min'' and ``max''. Checks that at least one element is below the min value and one element is above the max value.
nickjillings@1363 532 \end{itemize}
b@2103 533 % QUANTISATION OF THE SCALE: to be implemented?
b@1447 534
b@2103 535 Show items have the attribute ``type'' set to ``show''. The following list gives the string to give the ``name'' attribute along with a description.
nickjillings@1363 536 \begin{itemize}
nickjillings@1363 537 \item \texttt{playhead}: Shows the playhead to the end user indicating where in the file they are currently listening
nickjillings@1363 538 \item \texttt{page-count}: Shows the current test page number and the total number of test pages.
nickjillings@1363 539 \item \texttt{volume}: Shows a master volume control to the user to manipulate the output gain of the page. This is tracked.
nickjillings@1363 540 \end{itemize}
b@1447 541
b@1447 542 \subsubsection{Multiple scales}
b@2427 543 \label{sec:multiscale}
b@1447 544 In the case of multiple rating scales, e.g. when the stimuli are to be rated in terms of attributes `timbre' and `spatial impression', multiple interface nodes will have to be added, each specifying the title and annotations.
b@1447 545
b@1447 546 This is where the \texttt{interface}'s \texttt{name} attribute is particularly important: use this to retrieve the rating values, comments and metrics associated with the specified interface.
b@1447 547 If none is given, you can still use the automatically given \texttt{interface-id}, which is the interface number starting with 0 and corresponding to the order in which the rating scales appear.
b@1447 548
b@1435 549 \subsection{Randomisation}
nickjillings@1363 550 \label{sec:randomisation}
b@1447 551 [WORK IN PROGRESS]
b@1435 552
b@1435 553 \subsubsection{Randomisation of configuration XML files}
nickjillings@1363 554 The python server has a special function to automatically cycle through a list of test pages. Instead of directly requesting an XML, simply setting the url item in the browser URL to \texttt{pseudo.xml} will cycle through a list of XMLs. These XMLs must be in the local directory called \texttt{./pseudo/}.
b@1435 555 % how to
b@1435 556 % explain how this is implemented in the pythonServer
nickjillings@1446 557 %Nick? already implemented in the PHP?
nickjillings@1446 558 % Needs to be implemented in PHP and automated better, will complete soon
b@1435 559
b@1435 560
b@2429 561 \subsubsection{Randomisation of page order}
b@2209 562 The page order randomisation is set by the \texttt{<setup>} node attribute \texttt{randomise-order}, for example \texttt{<setup ... randomise-order=\texttt{\char`\"}true\texttt{\char`\"}>...</setup>} will randomise the test page order. When not set, the default is to \textbf{not} randomise the test page order.
b@1435 563
b@1435 564 \subsubsection{Randomisation of axis order}
b@1435 565
b@1435 566 \subsubsection{Randomisation of fragment order}
b@2209 567 The audio fragment randomisation is set by the \texttt{<audioholder>} node attribute \texttt{randomise-order}, for example \texttt{<audioholder ... randomise-order=\texttt{\char`\"}true\texttt{\char`\"}>...</audioholder>} will randomise the test page order. When not set, the default is to \textbf{not} randomise the test page order.
b@1435 568
b@1435 569 \subsubsection{Randomisation of initial slider position}
nickjillings@1446 570 By default slider values are randomised on start. The MUSHRA interface supports setting the initial values of all sliders throught the \texttt{<audioholder>} attribute \texttt{initial-position}. This takes an integer between 0 and 100 to signify the slider position.
b@1435 571 % /subsubsection{Randomisation of survey question order}
b@1435 572 % should be an attribute of the individual 'pretest' and 'posttest' elements
b@1435 573 % uncomment once we have it
b@1435 574
b@1435 575 \subsection{Looping}
nickjillings@1363 576 \label{sec:looping}
nickjillings@1363 577 Looping enables the fragments to loop until stopped by the user. Looping is synchronous so all fragments start at the same time on each loop.
b@2103 578 Individual test pages can have their playback looped by the \texttt{<page>} attribute \texttt{loop} with a value of ``true'' or ``false''.
b@1435 579 If the fragments are not of equal length initially, they are padded with zeros so that they are equal length, to enable looping without the fragments going out of sync relative to each other.
b@1435 580
nickjillings@1363 581 Note that fragments cannot be played until all page fragments are loaded when in looped mode, as the engine needs to know the length of each fragment to calculate the padding.
b@1435 582
b@1435 583 \subsection{Sample rate}
nickjillings@1363 584 \label{sec:samplerate}
b@2429 585 If you require the test to be conducted at a certain sample rate (i.e. you do not tolerate resampling of the elements to correspond with the system's sample rate), add \texttt{sampleRate="96000"} - where ``96000'' can be any supported sample rate (in Hz) - so that a warning message is shown alerting the subject that their system's sample rate is different from this enforced sample rate. This is checked immediately after parsing and stops the page loading any other elements if this check has failed.
b@1435 586
b@1435 587 \subsection{Metrics}
b@2429 588 The \texttt{metric} node, which contains the metrics to be tracked during the complete test, is a child of the \texttt{setup} node, and it could look as follows.
b@1435 589
b@1435 590 \begin{lstlisting}
b@2429 591 <metric>
b@1435 592 <metricEnable>testTimer</metricEnable>
b@1435 593 <metricEnable>elementTimer</metricEnable>
b@1435 594 <metricEnable>elementInitialPosition</metricEnable>
b@1435 595 <metricEnable>elementTracker</metricEnable>
b@1435 596 <metricEnable>elementFlagListenedTo</metricEnable>
b@1435 597 <metricEnable>elementFlagMoved</metricEnable>
b@1435 598 <metricEnable>elementListenTracker</metricEnable>
b@2429 599 </metric>
b@1435 600 \end{lstlisting}
b@1435 601
nickjillings@1365 602 When in doubt, err on the inclusive side, as one never knows which information is needed in the future. Most of these metrics are necessary for post-processing scripts such as timeline\_view\_movement.py. % Brecht: should perhaps list somewhere what metrics are required for which analysis scripts.
b@1435 603
b@1435 604 \subsubsection{Time test duration}
b@1435 605 \texttt{testTimer}\\
b@2429 606 One per test page. Presents the total test time from the first playback on the test page to the submission of the test page (excluding test time of the pre-/post- test surveys). This is presented in the results as \texttt{<metricresult id=\texttt{\char`\"}testTime\texttt{\char`\"}> 8.60299319727892 </metricresult>}. The time is in seconds.
b@1435 607
b@1435 608 \subsubsection{Time fragment playback}
b@1435 609 \texttt{elementTimer}\\
b@2209 610 One per audio fragment per test page. This totals up the entire time the audio fragment has been listened to in this test and presented \texttt{<metricresult name="enableElementTimer\texttt{\char`\"}> 1.0042630385487428 </metricresult>}. The time is in seconds.
b@1435 611
b@1435 612 \subsubsection{Initial positions}
b@1435 613 \texttt{elementInitialPosition}\\
b@2209 614 One per audio fragment per test page. Tracks the initial position of the sliders, especially relevant when these are randomised. Example result \texttt{<metricresult name="elementInitialPosition\texttt{\char`\"}> 0.8395522388059702 </metricresult>}.
b@1435 615
b@1435 616 \subsubsection{Track movements}
nickjillings@1446 617 \texttt{elementTracker}\\
nickjillings@1446 618 One per audio fragment per test page. Tracks the movement of each interface object. Each movement event has the time it occured at and the new value.
nickjillings@1446 619 \subsubsection{Which fragments listened to}
nickjillings@1446 620 \texttt{elementFlagListenedTo}\\
nickjillings@1446 621 One per audio fragment per test page. Boolean response, set to true if listened to.
nickjillings@1446 622 \subsubsection{Which fragments moved}
nickjillings@1446 623 \texttt{elementFlagMoved}\\
nickjillings@1446 624 One per audio fragment per test page. Binary check whether or not a the marker corresponding with a particular fragment was moved at all throughout the experiment.
b@1435 625
nickjillings@1446 626 \subsubsection{elementListenTracker}
nickjillings@1446 627 \texttt{elementListenTracker}\\
nickjillings@1446 628 One per audio fragment per test page. Tracks the playback events of each audio element pairing both the time in the test when playback started and when it stopped, it also gives the buffertime positions.
b@1435 629
b@1435 630 \subsection{References and anchors}
nickjillings@1363 631 \label{sec:referencesandanchors}
nickjillings@1446 632 The audio elements, \texttt{<audioelement>} have the attribute \texttt{type}, which defaults to normal. Setting this to one of the following will have the following effects.
nickjillings@1446 633 \subsubsection{Outside Reference}
b@2103 634 Set type to `outside-reference'. This will place the object in a separate playback element clearly labelled as an outside reference. This is exempt of any movement checks but will still be included in any listening checks.
b@1435 635 \subsubsection{Hidden reference}
b@2427 636 Set type to `reference'. The element will still be randomised as normal (if selected) and presented to the user. However the element will have the `reference' type in the results to quickly find it. The reference can be forced to be above a value before completing the test page by setting the attribute `marker' to be a value between 0 and 100 representing the integer value position it must be equal to or above.
b@1435 637 \subsubsection{Hidden anchor}
b@2103 638 Set type to `anchor'. The element will still be randomised as normal (if selected) and presented to the user. However the element will have the `anchor' type in the results to quickly find it. The anchor can be forced to be below a value before completing the test page by setting the attribute `marker' to be a value between 0 and 100 representing the integer value position it must be equal to or below.
b@1435 639
b@1435 640 \subsection{Checks}
b@1435 641 \label{sec:checks}
b@1435 642
b@1435 643 %blabla
b@1435 644 These checks are enabled in the \texttt{interface} node, which is a child of the \texttt{setup} node.
b@1435 645 \subsubsection{Playback checks}
b@1435 646 % what it does/is
b@1435 647 Enforce playing each sample at least once, for at least a little bit (e.g. this test is satisfied even if you only play a tiny portion of the file), by alerting the user to which samples have not been played upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before clicking each sample at least once.
b@1435 648 % how to enable/disable
b@1435 649
b@1435 650 Alternatively, one can check whether the \emph{entire} fragment was listened to at least once.
b@1435 651 % how to enable
b@1435 652
b@1435 653 Add \texttt{<check name="fragmentPlayed"/>} to the \texttt{interface} node.
b@1435 654
b@1435 655
b@1435 656 \subsubsection{Movement check}
b@1435 657 Enforce moving each sample at least once, for at least a little bit (e.g. this test is satisfied even if you only play a tiny portion of the file), by alerting the user to which samples have not been played upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before clicking each sample at least once.
b@1435 658 If there are several axes, the warning will specify which samples have to be moved on which axis.
b@1435 659
b@1435 660 Add \texttt{<check name="fragmentMoved"/>} to the \texttt{interface} node.
b@1435 661
b@1435 662 \subsubsection{Comment check}
b@1435 663 % How to enable/disable?
b@1435 664
b@1435 665 Enforce commenting, by alerting the user to which samples have not been commented on upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before putting at least one character in each comment box.
b@1435 666
b@1435 667 Note that this does not apply to any extra (text, radio button, checkbox) elements, unless these have the `mandatory' option enabled. %Nick? is this extra 'mandatory' option implemented?
b@1435 668
b@1435 669 Add \texttt{<check name="fragmentComments"/>} to the \texttt{interface} node.
b@1435 670
b@1435 671 %ADD: how to add a custom comment box
b@1435 672
b@1435 673 \subsubsection{Scale use check}
b@1435 674 It is possible to enforce a certain usage of the scale, meaning that at least one slider needs to be below and/or above a certain percentage of the slider.
b@1435 675
b@1435 676 Add \texttt{<check name="scalerange" min="25" max="75"/>} to the \texttt{interface} node.
b@1435 677
b@1435 678 \subsubsection{Note on the use of multiple rating axes}
b@1435 679 I.e. what if more than one axis? How to specify which axis the checks relate to? %Nick? to add?
b@1435 680
b@1435 681 \subsection{Platform information}
b@1435 682 % what does it do, what does it look like
b@1435 683 % limitations?
b@1447 684 For troubleshooting and usage statistics purposes, information about the browser and the operating system is logged in the results XML file. This is especially useful in the case of remote tests, when it is not certain which operating system, browser and/or browser were used. Note that this information is not always available and/or accurate, e.g. when the subject has taken steps to be more anonymous, so it should be treated as a guide only.
b@1447 685
b@1447 686 Example:
b@1447 687 \begin{lstlisting}
b@1447 688 <navigator>
b@1447 689 <platform>MacIntel</platform>
b@1447 690 <vendor>Google Inc.</vendor>
nickjillings@1363 691 <uagent>Mozilla/5.0 ... </uagent>
nickjillings@1365 692 <screen innerHeight="1900px" innerWidth="1920px"/>
b@1447 693 </navigator>
b@1447 694 \end{lstlisting}
b@1435 695
b@1435 696 \subsection{Gain}
b@1435 697 It is possible to set the gain (in decibel) applied to the different audioelements, as an attribute of the \texttt{audioelement} nodes in the configuration XML file:
b@1435 698
nickjillings@1446 699 \texttt{<audioElements url="sample-01.wav" gain="-6" id="sample01quieter" />}\\
nickjillings@1365 700 Please note, there are no checks on this to detect if accidentaly typed in linear. This gain is applied \emph{after} any loudness normalisation.
b@1435 701
b@1435 702 \subsection{Loudness}
nickjillings@1363 703 \label{sec:loudness}
b@1435 704 % automatic loudness equalisation
b@1435 705 % guide to loudness.js
b@2429 706 Each audio fragment on loading has its loudness calculated. The tool uses the EBU R 128 recommendation following the ITU-R BS.1770-4 loudness calculations to return the integrated LUFS loudness. The attribute \texttt{loudness} will set the loudness from the scope it is applied in. Applying it in the \texttt{<setup>} node will set the loudness for all test pages. Applying it in the \texttt{<page>} node will set the loudness for that page. Applying it in the \texttt{<audioelement>} node will set the loudness for that fragment. The scope is set locally, so if there is a loudness on both the \texttt{<page>} and \texttt{<setup>} nodes, that test page will take the value associated with the \texttt{<page>}. The loudness attribute is set in LUFS
nickjillings@1365 707
nickjillings@1365 708 \subsection{Comment Boxes}
nickjillings@1365 709 \label{sec:commentboxes}
b@2429 710 There are two types of comment boxes which can be presented, those linked to the audio fragments on the page and those which pose a general question. When enabled, there is a comment box below the main interface for every fragment on the page. There is some customisation around the text that accompanies the box, by default the text will read ``Comment on fragment'' followed by the fragment identifier (the number / letter shown by the interface). This `prefix' can be modified using the page node \texttt{<commentboxprefix>}, see Section~\ref{sec:page} for where to place this node in the document. The comment box prefix node takes no attribute and the text contained by the node represents to the prefix. For instance if we have a node \texttt{<commentboxprefix> Describe fragment </commentboxprefix>}, then the interface will show ``Describe fragment'' followed by the identifier.
nickjillings@1365 711
nickjillings@1365 712 The second type of comment box is slightly more complex because it can handle different types of response data. These are called comment questions because they are located in the comment section of the test but pose a specific question.
b@1435 713
b@1435 714 \clearpage
b@1435 715
b@1402 716
b@1402 717 \section{Using the test create tool}
b@1402 718 We provide a test creation tool, available in the directory test\_create. This tool is a self-contained web page, so doubling clicking will launch the page in your system default browser.
b@1402 719
b@1402 720 The test creation tool can help you build a simple test very quickly. By simply selecting your interface and clicking check-boxes you can build a test in minutes.
b@1402 721
b@1402 722 Include audio by dragging and dropping the stimuli you wish to include.
b@1402 723
b@1402 724 The tool examines your XML before exporting to ensure you do not export an invalid XML structure which would crash the test.
b@1402 725
b@1402 726 This guide will help you to construct your own interface on top of the WAET (Web Audio Evaluation Tool) engine. The WAET engine resides in the core.js file, this contains prototype objects to handle most of the test creation, operation and data collection. The interface simply has to link into this at the correct points.
nickjillings@1446 727
b@1402 728
b@1435 729 \clearpage
b@1435 730 \section{Analysis and diagnostics}
b@1435 731 \subsection{In the browser}
b@1435 732 See `analysis.html' in the main folder: immediate visualisation of (by default) all results in the `saves/' folder.
b@1435 733
b@1435 734 \subsection{Python scripts}
b@2264 735 The package includes Python (2.7) scripts (in `python/') to extract ratings and comments, generate visualisations of ratings and timelines, and produce a fully fledged report.
b@1435 736
b@1435 737 Visualisation requires the free matplotlib toolbox (http://matplotlib.org), numpy and scipy.
b@2264 738 By default, the scripts can be run from the `python' folder, with the result files in the `saves' folder (the default location where result XMLs are stored). Each script takes the XML file folder as an argument, along with other arguments in some cases.
b@1435 739 Note: to avoid all kinds of problems, please avoid using spaces in file and folder names (this may work on some systems, but others don't like it).
b@1435 740
b@1435 741 \subsubsection{comment\_parser.py}
b@1435 742 Extracts comments from the output XML files corresponding with the different subjects found in `saves/'. It creates a folder per `audioholder'/page it finds, and stores a CSV file with comments for every `audioelement'/fragment within these respective `audioholders'/pages. In this CSV file, every line corresponds with a subject/output XML file. Depending on the settings, the first column containing the name of the corresponding XML file can be omitted (for anonymisation).
b@1435 743 Beware of Excel: sometimes the UTF-8 is not properly imported, leading to problems with special characters in the comments (particularly cumbersome for foreign languages).
b@1435 744
b@1435 745 \subsubsection{evaluation\_stats.py}
b@1435 746 Shows a few statistics of tests in the `saves/' folder so far, mainly for checking for errors. Shows the number of files that are there, the audioholder IDs that were tested (and how many of each separate ID), the duration of each page, the duration of each complete test, the average duration per page, and the average duration in function of the page number.
b@1435 747
b@1435 748 \subsubsection{generate\_report.py}
b@1435 749 Similar to `evaluation\_stats.py', but generates a PDF report based on the output files in the `saves/' folder - or any folder specified as command line argument. Uses pdflatex to write a LaTeX document, then convert to a PDF.
b@1435 750
b@1435 751 \subsubsection{score\_parser.py}
b@1435 752 Extracts rating values from the XML to CSV - necessary for running visualisation of ratings. Creates the folder `saves/ratings/' if not yet created, to which it writes a separate file for every `audioholder'/page in any of the output XMLs it finds in `saves/'. Within each file, rows represent different subjects (output XML file names) and columns represent different `audioelements'/fragments.
b@1435 753
b@1435 754 \subsubsection{score\_plot.py}
b@1435 755 Plots the ratings as stored in the CSVs created by score\_parser.py
b@1435 756 Depending on the settings, it displays and/or saves (in `saves/ratings/') a boxplot, confidence interval plot, scatter plot, or a combination of the aforementioned.
b@1435 757 Requires the free matplotlib library.
b@1435 758 At this point, more than one subjects are needed for this script to work.
b@1435 759
b@1435 760 \subsubsection{timeline\_view\_movement.py}
b@1435 761 Creates a timeline for every subject, for every `audioholder'/page, corresponding with any of the output XML files found in `saves/'. It shows the marker movements of the different fragments, along with when each fragment was played (red regions). Automatically takes fragment names, rating axis title, rating axis labels, and audioholder name from the XML file (if available).
b@1435 762
b@1435 763 \subsubsection{timeline\_view.py} % should be omitted or absorbed by the above soon
b@1435 764 Creates a timeline for every subject, for every `audioholder'/page, corresponding with any of the output XML files found in `saves/'. It shows when and for how long the subject listened to each of the fragments.
b@1435 765
b@1402 766
b@1402 767
b@1402 768 \clearpage
b@1402 769 \section{Troubleshooting} \label{sec:troubleshooting}
b@1435 770 \subsection{Reporting bugs and requesting features}
b@1435 771 Thanks to feedback from using the interface in experiments by the authors and others, many bugs have been caught and fatal crashes due to the interface seem to be a thing of the past entirely.
b@1402 772
b@1435 773 We continually develop this tool to fix issues and implement features useful to us or our user base. See \url{https://code.soundsoftware.ac.uk/projects/webaudioevaluationtool/issues} for a list of feature requests and bug reports, and their status.
b@1402 774
b@1435 775 Please contact the authors if you experience any bugs, if you would like additional functionality, if you spot any errors or gaps in the documentation, if you have questions about using the interface, or if you would like to give any feedback (even positive!) about the interface. We look forward to learning how the tool has (not) been useful to you.
b@1402 776
b@1402 777
b@1435 778 \subsection{First aid}
b@1435 779 Meanwhile, if things do go wrong or the test needs to be interrupted for whatever reason, all data is not lost. In a normal scenario, the test needs to be completed until the end (the final `Submit'), at which point the output XML is stored in the \texttt{saves/}. If this stage is not reached, open the JavaScript Console (see below for how to find it) and type
b@1402 780
b@1435 781 \texttt{createProjectSave()}
b@1402 782
b@1435 783 to present the result XML file on the client side, or
b@1402 784
b@1435 785 \texttt{createProjectSave(specification.projectReturn)}
b@1402 786
b@1435 787 to try to store it to the specified location, e.g. the `saves/' folder on the web server or the local machine (on failure the result XML should be presented directly in the web browser instead)
b@1402 788
b@1435 789 and hit enter. This will open a pop-up window with a hyperlink that reads `Save File'; click it and an XML file with results until that point should be stored in your download folder.
b@1435 790
b@1435 791 Alternatively, a lot of data can be read from the same console, in which the tool prints a lot of debug information. Specifically:
b@1435 792 \begin{itemize}
b@1435 793 \item the randomisation of pages and fragments are logged;
b@1435 794 \item any time a slider is played, its ID and the time stamp (in seconds since the start of the test) are displayed;
b@1435 795 \item any time a slider is dragged and dropped, the location where it is dropped including the time stamp are shown;
b@1435 796 \item any comments and pre- or post-test questions and their answers are logged as well.
b@1435 797 \end{itemize}
b@1402 798
b@1435 799 You can select all this and save into a text file, so that none of this data is lost. You may to choose to do this even when a test was successful as an extra precaution.
b@1402 800
b@1435 801 If you encounter any issue which you believe to be caused by any aspect of the tool, and/or which the documentation does not mention, please do let us know!
b@1402 802
b@1435 803 \subsubsection*{Opening the JavaScript Console}
b@1435 804 \begin{itemize}
b@1435 805 \item In Google Chrome, the JavaScript Console can be found in \textbf{View$>$Developer$>$JavaScript Console}, or via the keyboard shortcut Cmd + Alt + J (Mac OS X).
b@1435 806 \item In Safari, the JavaScript Console can be found in \textbf{Develop$>$Show Error Console}, or via the keyboard shortcut Cmd + Alt + C (Mac OS X). Note that for the Developer menu to be visible, you have to go to Preferences (Cmd + ,) and enable `Show Develop menu in menu bar' in the `Advanced' tab. \textbf{Note that as long as the Developer menu is not visible, nothing is logged to the console, i.e. you will only be able to see diagnostic information from when you switched on the Developer tools onwards.}
b@1435 807 \item In Firefox, go to \textbf{Tools$>$Web Developer$>$Web Console}, or hit Cmd + Alt + K.
b@1435 808 \end{itemize}
b@1402 809
b@1435 810 \subsection{Known issues and limitations}
b@1435 811 \label{sec:knownissues}
b@1435 812
b@1435 813 The following is a non-exhaustive list of problems and limitations you may experience using this tool, due to not being supported yet by us, or by the Web Audio API and/or (some) browsers.
b@1435 814
b@1435 815 \begin{itemize}
b@1435 816 \item Issue \href{https://code.soundsoftware.ac.uk/issues/1463}{\textbf{\#1463}}: \textbf{Firefox} only supports 8 bit and 16 bit WAV files. Pending automatic requantisation (which deteriorates the audio signal's dynamic range to some extent), WAV format stimuli need to adhere to these limitations in order for the test to be compatible with Firefox.
b@1435 817 \item Issues \href{https://code.soundsoftware.ac.uk/issues/1474}{\textbf{\#1474}} and \href{https://code.soundsoftware.ac.uk/issues/1462}{\textbf{\#1462}}: On occasions, audio is not working - or only a continuous `beep' can be heard - notably in \textbf{Safari}. Refreshing, quitting the browser and even enabling Developer tools in Safari's Preferences pane (`Advanced' tab: ``Show `Develop' menu in menu bar'') has helped resolve this. If no (high quality) audio can be heard, make sure your entire playback system's settings are all correct.
b@1435 818 \end{itemize}
b@1402 819
b@1402 820 \clearpage
b@1402 821 \bibliographystyle{ieeetr}
b@1402 822 \bibliography{Instructions}{}
b@1402 823
b@1402 824
b@1402 825 \clearpage
b@1402 826 \appendix
b@1402 827
b@1435 828 \section{Legacy}
b@1435 829 The APE interface and most of the functionality of the first WAET editions are inspired by the APE toolbox for MATLAB \cite{ape}. See \url{https://code.soundsoftware.ac.uk/projects/ape} for the source code and \url{http://brechtdeman.com/publications/aes136.pdf} for the corresponding paper.
b@1435 830
b@1435 831 \clearpage
b@1435 832
b@1402 833 \section{Listening test instructions example}
b@1402 834
b@1402 835 Before each test, show the instructions below or similar and make sure it is available to the subject throughout the test. Make sure to ask whether the participant has any questions upon seeing and/or reading the instructions.
b@1402 836
b@1402 837 \begin{itemize}
b@1402 838 \item You will be asked for your name (``John Smith'') and location (room identifier).
b@1402 839 \item An interface will appear, where you are asked to
b@1402 840 \begin{itemize}
b@1402 841 \item click green markers to play the different mixes;
b@1402 842 \item drag the markers on a scale to reflect your preference for the mixes;
b@1402 843 \item comment on these mixes, using text boxes with corresponding numbers (in your \textbf{native language});
b@1402 844 \item optionally comment on all mixes together, or on the song, in `General comments'.
b@1402 845 \end{itemize}
b@1402 846 \item You are asked for your personal, honest opinion. Feel free to use the full range of the scale to convey your opinion of the various mixes. Don?t be afraid to be harsh and direct.
b@1402 847 \item The markers appear at random positions at first (which means some markers may hide behind others).
b@1402 848 \item The interface can take a few seconds to start playback, but switching between mixes should be instantaneous.
b@1402 849 \item This is a research experiment, so please forgive us if things go wrong. Let us know immediately and we will fix it or restart the test.
b@1402 850 \item When the test is finished (after all songs have been evaluated), just call the experimenter, do NOT close the window.
b@1402 851 \item After the test, please fill out our survey about your background, experience and feedback on the test.
b@1402 852 \item By participating, you consent to us using all collected data for research. Unless asked explicitly, all data will be anonymised when shared.
b@1402 853 \end{itemize}
b@1402 854
b@1402 855 \clearpage
b@1402 856
b@1390 857 \section{Terminology} % just to keep track of what exactly we call things. Don't use terms that are too different, to avoid confusion.
b@1390 858 As a guide to better understand the Instructions, and to expand them later, here is a list of terms that may be unclear or ambiguous unless properly defined.
b@1435 859 \begin{description}
b@1435 860 \item[Subject] The word we use for a participant, user, ... of the test, i.e. not the experimenter who designs the test but the person who evaluates the audio under test as part of an experiment (or the preparation of one).
b@1390 861 \item[User] The person who uses the tool to configure, run and analyse the test - i.e. the experimenter, most likely a researcher - or at least
b@2233 862 \item[Page] A screen in a test
b@1447 863 \item[Fragment] An element, stimulus or sample in a test; corresponds with an \texttt{audioelement}
b@1435 864 \item[Test] A complete test which can consist of several pages; corresponds with an entire configuration XML file
b@1435 865 \item[Configuration XML file] The XML file containing the necessary information on interface, samples, survey questions, configurations, ... which the JavaScript modules read to produce the desired test.
b@1435 866 \item[Results XML file] The output of a successful test, including ratings, comments, survey responses, timing information, and the complete configuration XML file with which the test was generated in the first place.
b@1435 867 \end{description}
b@1435 868
b@1435 869 \clearpage
b@1435 870
b@1435 871 \setcounter{secnumdepth}{0} % don't number this last bit
b@1435 872 \section{Contact details} % maybe add web pages, Twitter accounts, whatever you like
b@1402 873 \label{sec:contact}
b@1402 874
b@1402 875 \begin{itemize}
b@1402 876 \item Nicholas Jillings: \texttt{nicholas.jillings@mail.bcu.ac.uk}
b@1402 877 \item Brecht De Man: \texttt{b.deman@qmul.ac.uk}
b@1402 878 \item David Moffat: \texttt{d.j.moffat@qmul.ac.uk}
b@1402 879 \end{itemize}
b@1402 880
b@1402 881 \end{document}