annotate docs/Instructions/Instructions.tex @ 490:632c82c7052a Dev_main

Feature #1234: Completed fix
author Nicholas Jillings <n.g.r.jillings@se14.qmul.ac.uk>
date Tue, 26 Jan 2016 14:20:43 +0000
parents 751fc4749b60
children 92df95a6e95d
rev   line source
b@378 1 \documentclass[11pt, oneside]{article} % use "amsart" instead of "article" for AMSLaTeX format
b@378 2 \usepackage{geometry} % See geometry.pdf to learn the layout options. There are lots.
b@378 3 \geometry{letterpaper} % ... or a4paper or a5paper or ...
b@378 4 %\geometry{landscape} % Activate for rotated page geometry
b@378 5 \usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent
b@378 6 \usepackage{graphicx} % Use pdf, png, jpg, or eps§ with pdflatex; use eps in DVI mode
b@378 7 % TeX will automatically convert eps --> pdf in pdflatex
b@378 8
b@378 9 \usepackage{listings} % Source code
b@413 10 \usepackage{xcolor} % colour (source code for instance)
b@413 11 \definecolor{grey}{rgb}{0.1,0.1,0.1}
b@413 12 \definecolor{darkblue}{rgb}{0.0,0.0,0.6}
b@413 13 \definecolor{cyan}{rgb}{0.0,0.6,0.6}
b@413 14
b@378 15 \usepackage{amssymb}
b@378 16 \usepackage{cite}
b@378 17 \usepackage{hyperref} % Hyperlinks
b@378 18 \usepackage[nottoc,numbib]{tocbibind} % 'References' in TOC
b@378 19
b@378 20 \graphicspath{{img/}} % Relative path where the images are stored.
b@378 21
b@378 22 \title{Instructions for \\ Web Audio Evaluation Tool}
b@378 23 \author{Nicholas Jillings, Brecht De Man and David Moffat}
b@378 24 \date{7 December 2015} % Activate to display a given date or no date
b@378 25
b@378 26 \begin{document}
b@378 27 \maketitle
b@378 28
b@413 29 These instructions are about use of the Web Audio Evaluation Tool on Windows and Mac OS X platforms.
b@413 30
b@413 31 We request that you acknowledge the authors and cite our work when using it \cite{waet}, see also CITING.txt.
b@413 32
b@413 33 The tool is available in its entirety including source code on \url{https://code.soundsoftware.ac.uk/projects/webaudioevaluationtool/}, under the GNU General Public License v3.0 (\url{http://choosealicense.com/licenses/gpl-3.0/}), see also LICENSE.txt.
b@413 34
b@429 35 % TO DO: Linux (Android, iOS)
b@378 36
b@378 37 \tableofcontents
b@378 38
b@378 39 \clearpage
b@378 40
b@378 41 \section{Installation}
b@413 42 Download the folder (\url{https://code.soundsoftware.ac.uk/hg/webaudioevaluationtool/archive/tip.zip}) and unzip in a location of your choice, or pull the source code from \url{https://code.soundsoftware.ac.uk/hg/webaudioevaluationtool} (Mercurial).
b@378 43
b@378 44 \subsection{Contents}
b@378 45 The folder should contain the following elements: \\
b@378 46
b@378 47 \textbf{Main folder:}
b@378 48 \begin{itemize}
b@378 49 \item \texttt{analyse.html}: analysis and diagnostics of a set of result XML files
b@378 50 \item \texttt{ape.css, core.css, graphics.css, mushra.css, structure.css}: style files (edit to change appearance)
b@378 51 \item \texttt{ape.js}: JavaScript file for APE-style interface \cite{ape}
b@413 52 \item \texttt{CITING.txt, LICENSE.txt, README.txt}: text files with, respectively, the citation which we ask to include in any work where this tool or any portion thereof is used, modified or otherwise; the license under which the software is shared; and a general readme file referring to these instructions.
b@378 53 \item \texttt{core.js}: JavaScript file with core functionality
b@378 54 \item \texttt{index.html}: webpage where interface should appear (includes link to test configuration XML)
b@378 55 \item \texttt{jquery-2.1.4.js}: jQuery JavaScript Library
b@413 56 \item \texttt{loudness.js}: Allows for automatic calculation of loudness of Web Audio API Buffer objects, return gain values to correct for a target loudness or match loudness between multiple objects
b@413 57 \item \texttt{mushra.js}: JavaScript file for MUSHRA-style interface \cite{mushra}
b@378 58 \item \texttt{pythonServer.py}: webserver for running tests locally
b@378 59 \item \texttt{pythonServer-legacy.py}: webserver with limited functionality (no automatic storing of output XML files)
b@378 60 \item \texttt{save.php}: PHP script to store result XML files to web server\\
b@378 61 \end{itemize}
b@378 62 \textbf{Documentation (./docs/)}
b@378 63 \begin{itemize}
b@413 64 \item \href{http://c4dm.eecs.qmul.ac.uk/dmrn/events/dmrnp10/#posters}{DMRN+10}: PDF and \LaTeX source of poster for 10\textsuperscript{th} Digital Music Research Network One-Day workshop (``soft launch'')
b@378 65 \item Instructions: PDF and \LaTeX source of these instructions
b@378 66 \item Project Specification Document (\LaTeX/PDF)
b@378 67 \item Results Specification Document (\LaTeX/PDF)
b@413 68 \item SMC15: PDF and \LaTeX source of 12th Sound and Music Computing Conference paper \cite{waet}
b@413 69 \item WAC2016: PDF and \LaTeX source of 2nd Web Audio Conference paper\\
b@378 70 \end{itemize}
b@378 71 \textbf{Example project (./example\_eval/)}
b@378 72 \begin{itemize}
b@378 73 \item An example of what the set up XML should look like, with example audio files 0.wav-10.wav which are short recordings at 44.1kHz, 16bit of a woman saying the corresponding number (useful for testing randomisation and general familiarisation with the interface).\\
b@378 74 \end{itemize}
b@378 75 \textbf{Output files (./saves/)}
b@378 76 \begin{itemize}
b@378 77 \item The output XML files of tests will be stored here by default by the \texttt{pythonServer.py} script.\\
b@378 78 \end{itemize}
b@378 79 \textbf{Auxiliary scripts (./scripts/)}
b@378 80 \begin{itemize}
b@378 81 \item Helpful Python scripts for extraction and visualisation of data.\\
b@378 82 \end{itemize}
b@378 83 \textbf{Test creation tool (./test\_create/)}
b@378 84 \begin{itemize}
b@378 85 \item Webpage for easily setting up your own test without having to delve into the XML.\\
b@378 86 \end{itemize}
b@378 87
b@413 88 \subsection{Compatibility}
b@378 89 As Microsoft Internet Explorer doesn't support the Web Audio API\footnote{\url{http://caniuse.com/\#feat=audio-api}}, you will need another browser like Google Chrome, Safari or Firefox (all three are tested and confirmed to work).
b@413 90
b@413 91 Firefox does not currently support other bit depths than 8 or 16 bit for PCM wave files. In the future, this will throw a warning message to tell the user that their content is being quantised automatically. %Nick? Right? To be removed if and when actually implemented
b@378 92
b@378 93 The tool is platform-independent and works in any browser that supports the Web Audio API. It does not require any specific, proprietary software. However, in case the tool is hosted locally (i.e. you are not hosting it on an actual webserver) you will need Python (2.7), which is a free programming language - see the next paragraph.
b@378 94
b@413 95 \clearpage
b@378 96
b@378 97
b@378 98 \section{Test setup}
b@378 99
b@378 100 \subsection{Sample rate}
b@378 101 Depending on how the experiment is set up, audio is resampled automatically (the Web Audio default) or the sample rate is enforced. In the latter case, you will need to make sure that the sample rate of the system is equal to the sample rate of these audio files. For this reason, all audio files in the experiment will have to have the same sample rate.
b@378 102
b@378 103 Always make sure that all other digital equipment in the playback chain (clock, audio interface, digital-to-analog converter, ...) is set to this same sample rate.
b@378 104
b@378 105 Note that upon changing the sampling rate, the browser will have to be restarted for the change to take effect.
b@378 106
b@378 107 \subsubsection{Mac OS X}
b@378 108 To change the sample rate in Mac OS X, go to \textbf{Applications/Utilities/Audio MIDI Setup} or find this application with Spotlight (see Figure \ref{fig:audiomidisetup}). Then select the output of the audio interface you are using and change the `Format' to the appropriate number. Also make sure the bit depth and channel count are as desired.
b@378 109 If you are using an external audio interface, you may have to go to the preference pane of that device to change the sample rate.
b@378 110
b@378 111 Also make sure left and right channel gains are equal, as some applications alter this without changing it back, leading to a predominantly louder left or right channel. See Figure \ref{fig:audiomidisetup} for an example where the channel gains are different.
b@378 112
b@378 113 \begin{figure}[tb]
b@378 114 \centering
b@378 115 \includegraphics[width=.65\textwidth]{img/audiomidisetup.png}
b@378 116 \caption{The Audio MIDI Setup window in Mac OS X}
b@378 117 \label{fig:audiomidisetup}
b@378 118 \end{figure}
b@378 119
b@378 120 \subsubsection{Windows}
b@378 121 To change the sample rate in Windows, right-click on the speaker icon in the lower-right corner of your desktop and choose `Playback devices'. Right-click the appropriate playback device and click `Properties'. Click the `Advanced' tab and verify or change the sample rate under `Default Format'. % NEEDS CONFIRMATION
b@378 122 If you are using an external audio interface, you may have to go to the preference pane of that device to change the sample rate.
b@378 123
b@378 124 \subsection{Local test}
b@378 125 If the test is hosted locally, you will need to run the local webserver provided with this tool.
b@378 126
nicholas@434 127 \subsubsection{Mac OS X \& Linux}
b@378 128
nicholas@434 129 On Mac OS X, Python comes preinstalled, as with most Unix/Linux distributions.
b@378 130
b@378 131 Open the Terminal (find it in \textbf{Applications/Terminal} or via Spotlight), and go to the folder you downloaded. To do this, type \texttt{cd [folder]}, where \texttt{[folder]} is the folder where to find the \texttt{pythonServer.py} script you downloaded. For instance, if the location is \texttt{/Users/John/Documents/test/}, then type
b@378 132
b@378 133 \texttt{cd /Users/John/Documents/test/}
b@378 134
b@378 135 Then hit enter and run the Python script by typing
b@378 136
b@378 137 \texttt{python pythonServer.py}
b@378 138
b@378 139 and hit enter again. See also Figure \ref{fig:terminal}.
b@378 140
b@378 141 \begin{figure}[htbp]
b@378 142 \begin{center}
b@378 143 \includegraphics[width=.75\textwidth]{pythonServer.png}
b@378 144 \caption{Mac OS X: The Terminal window after going to the right folder (\texttt{cd [folder\_path]}) and running \texttt{pythonServer.py}.}
b@378 145 \label{fig:terminal}
b@378 146 \end{center}
b@378 147 \end{figure}
b@378 148
b@378 149 Alternatively, you can simply type \texttt{python} (follwed by a space) and drag the file into the Terminal window from Finder. % DOESN'T WORK YET
b@378 150
nicholas@434 151 You can leave this running throughout the different experiments (i.e. leave the Terminal open). Once running the terminal will report the current URL to type into your browser to initiate the test, usually this is http://localhost:8000/.
b@378 152
b@378 153 To start the test, open the browser and type
b@378 154
b@378 155 \texttt{localhost:8000}
b@378 156
b@378 157 and hit enter. The test should start (see Figure \ref{fig:test}).
b@378 158
b@378 159 To quit the server, either close the terminal window or press Ctrl+C on your keyboard to forcibly shut the server.
b@378 160
b@378 161 \subsubsection{Windows}
b@378 162
b@378 163 On Windows, Python 2.7 is not generally preinstalled and therefore has to be downloaded\footnote{\url{https://www.python.org/downloads/windows/}} and installed to be able to run scripts such as the local webserver, necessary if the tool is hosted locally.
b@378 164
b@378 165 Simply double click the Python script \texttt{pythonServer.py} in the folder you downloaded.
b@378 166
b@378 167 You may see a warning like the one in Figure \ref{fig:warning}. Click `Allow access'.
b@378 168
b@378 169 \begin{figure}[htbp]
b@378 170 \begin{center}
b@378 171 \includegraphics[width=.6\textwidth]{warning.png}
b@378 172 \caption{Windows: Potential warning message when executing \texttt{pythonServer.py}.}
b@378 173 \label{fig:warning}
b@378 174 \end{center}
b@378 175 \end{figure}
b@378 176
b@378 177 The process should now start, in the Command prompt that opens - see Figure \ref{fig:python}.
b@378 178
b@378 179 \begin{figure}[htbp]
b@378 180 \begin{center}
b@378 181 \includegraphics[width=.75\textwidth]{python.png}
b@378 182 \caption{Windows: The Command Prompt after running \texttt{pythonServer.py} and opening the corresponding website.}
b@378 183 \label{fig:python}
b@378 184 \end{center}
b@378 185 \end{figure}
b@378 186
b@378 187 You can leave this running throughout the different experiments (i.e. leave the Command Prompt open).
b@378 188
b@378 189 To start the test, open the browser and type
b@378 190
b@378 191 \texttt{localhost:8000}
b@378 192
b@378 193 and hit enter. The test should start (see Figure \ref{fig:test}).
b@378 194
b@378 195 \begin{figure}[htb]
b@378 196 \begin{center}
b@378 197 \includegraphics[width=.8\textwidth]{test.png}
b@378 198 \caption{The start of the test in Google Chrome on Windows 7.}
b@378 199 \label{fig:test}
b@378 200 \end{center}
b@378 201 \end{figure}
b@378 202
b@378 203 If at any point in the test the participant reports weird behaviour or an error of some kind, or the test needs to be interrupted, please notify the experimenter and/or refer to Section \ref{sec:troubleshooting}.
b@378 204
b@378 205 When the test is over (the subject should see a message to that effect, and click `Submit' one last time), the output XML file containing all collected data should have appeared in `saves/'. The names of these files are `test-0.xml', `test-1.xml', etc., in ascending order. The Terminal or Command prompt running the local web server will display the following file name. If such a file did not appear, please again refer to Section \ref{sec:troubleshooting}.
b@378 206
b@378 207 It is advised that you back up these results as often as possible, as a loss of this data means that the time and effort spent by the subject(s) has been in vain. Save the results to an external or network drive, and/or send them to the experimenter regularly.
b@378 208
b@378 209 To start the test again for a new participant, you do not need to close the browser or shut down the Terminal or Command Prompt. Simply refresh the page or go to \texttt{localhost:8000} again.
b@378 210
b@378 211
b@378 212 \subsection{Remote test}
b@378 213 Put all files on a web server which supports PHP. This allows the `save.php' script to store the XML result files in the `saves/' folder. If the web server is not able to store the XML file there at the end of the test, it will present the XML file locally to the user, as a `Save file' link.
b@413 214
b@413 215 Make sure the \texttt{projectReturn} attribute of the \texttt{setup} node is set to the \texttt{save.php} script.
b@413 216
b@413 217 Then, just go to the URL of the corresponding HTML file, e.g. \texttt{http://server.com/path/to/WAET/index.html?url=test/my-test.xml}. If storing on the server doesn't work at submission (e.g. if the \texttt{projectReturn} attribute isn't properly set), the result XML file will be presented to the subject on the client side, as a `Save file' link.
b@413 218
nicholas@434 219 \subsection{Multiple test documents}
nicholas@434 220 By default the index page will load a demo page of tests. To automatically load a test document, you need to append the location in the URL. If your URL is normally http://localhost:8000/index.html you would append the following: \texttt{?url=/path/to/your/test.xml}. Replace the fields with your actual path, the path is local to the running directory, so if you have your test in the directory \texttt{example\_eval} called \texttt{project.xml} you would append \texttt{?url=/example\_eval/project.xml}.
b@413 221
b@378 222 \clearpage
b@413 223
b@413 224 \section{Interfaces}
b@413 225
b@413 226 The Web Audio Evaluation Tool comes with a number of interface styles, each of which can be customised extensively, either by configuring them differently using the many optional features, or by modifying the JavaScript files.
b@413 227
b@413 228 To set the interface style for the whole test, %Nick? change when this is not the case anymore, i.e. when the interface can be set per page
b@413 229 add \texttt{interface="APE"} to the \texttt{setup} node, where \texttt{"APE"} is one of the interface names below.
b@413 230
b@413 231 \subsection{APE}
b@413 232 The APE interface is based on \cite{ape}, and consists of one or more axes, each corresponding with an attribute to be rated, on which markers are placed. As such, it is a multiple stimulus interface where (for each dimension or attribute) all elements are on one axis so that they can be maximally compared against each other, as opposed to rated individually or with regards to a single reference.
b@413 233 It also contains an optional text box for each element, to allow for clarification by the subject, tagging, and so on.
b@413 234
b@413 235 \subsection{MUSHRA}
b@413 236 This is a straightforward implementation of \cite{mushra}, especially common for the rating of audio quality, for instance for the evaluation of audio codecs.
b@413 237
b@378 238
b@413 239 \clearpage
b@413 240
b@413 241 \section{Features}
b@413 242
b@439 243 This section covers the different features implemented in the Web Audio Evaluation Tool, how to use them, and what to know about them.
b@413 244
b@413 245 Unless otherwise specified, \emph{each} feature described here is optional, i.e. it can be enabled or disabled and adjusted to some extent.
b@413 246
b@413 247 As the example project showcases (nearly) all of these features, please refer to its configuration XML document for a demonstration of how to enable and adjust them.
b@413 248
b@439 249 \subsection{Interface layout}
b@439 250 The \texttt{interface} node (child of \texttt{audioholder}) contains
b@439 251
b@439 252 Example:
b@439 253
b@439 254 \begin{lstlisting}
b@439 255 <interface name="quality">
b@439 256 <title>Audio Quality</title>
b@439 257 <scale position="10">Poor</scale>
b@439 258 <scale position="90">Excellent</scale>
b@439 259 <commentBoxPrefix>Comment on fragment</commentBoxPrefix>
b@439 260 </interface>
b@439 261 \end{lstlisting}
b@439 262
b@439 263 \subsubsection{Title}
b@439 264 Specifies the axis title as displayed on the interface.
b@439 265
b@439 266 If this tag is absent, the title will default to `Axis \emph{[number]}'. Therefore, if no title is desired, just add the title tag (\texttt{<title/>}) without text.
b@439 267
b@439 268 \subsubsection{Annotation}
b@439 269 Words or numbers can be placed on specific positions of the scale with the \texttt{scale} tag. The \texttt{position} attribute is a value from 0 to 100, corresponding to the percentage of the width/height of the scale where you want the string to be placed.
b@439 270
b@439 271 \subsubsection{Comment box prefix}
b@439 272 If comment boxes corresponding with the fragments are enabled, this sets the comment box string after which the fragment number is appended.
b@439 273
b@439 274 The default value is ``Comment on fragment''. So in this case, each comment box would have a header ``Comment on fragment \emph[number]''.
b@439 275
b@439 276 \subsubsection{Multiple scales}
b@439 277 In the case of multiple rating scales, e.g. when the stimuli are to be rated in terms of attributes `timbre' and `spatial impression', multiple interface nodes will have to be added, each specifying the title and annotations.
b@439 278
b@439 279 This is where the \texttt{interface}'s \texttt{name} attribute is particularly important: use this to retrieve the rating values, comments and metrics associated with the specified interface.
b@439 280 If none is given, you can still use the automatically given \texttt{interface-id}, which is the interface number starting with 0 and corresponding to the order in which the rating scales appear.
b@439 281
b@439 282
b@413 283 \subsection{Surveys}
nicholas@434 284 Surveys are conducted through an in-page popup window which can collect data using various HTML functions, see Survey elements below for a list. Survey questions are placed into the \texttt{<pretest>} or \texttt{<posttest>} nodes. Appending these nodes to the \texttt{<setup>} node will have the survey options appear before any test pages (if in the \texttt{<pretest>} node) or after all test pages. Placing the survey options in the \texttt{<audioholder>} node will have them appear before or after the test page they are a child of.
b@413 285 \subsubsection{Survey elements}
b@413 286 All survey elements (which `pop up' in the centre of the browser) have an \texttt{id} attribute, for retrieval of the responses in post-processing of the results, and a \texttt{mandatory} attribute, which if set to ``true'' requires the subjects to respond before they can continue.
b@413 287
b@413 288 \begin{description}
b@413 289 \item[statement] Simply shows text to the subject until `Next' or `Start' is clicked.
nicholas@434 290 \item[question] Expects a text answer (in a text box). Has the \texttt{boxsize} argument: set to ``large'' or ``huge'' for a bigger box size, or ``small'' for small.
nicholas@434 291 \item[number] Only accepts a numerical value. Attribute \texttt{min="0"} specifies the minimum value - in this case the answer must be stricly positive before the subject can continue.
nicholas@434 292 \item[radio] Radio buttons. Presents a list of options to the user using radio buttons, where only one option from the list can be selected.
b@429 293 \item[checkbox] Checkboxes. Note that when making a checkbox question ``mandatory'', the subject is forced to select at least one option (which could be e.g. `Other' or `None').\\
b@413 294 \end{description}
b@413 295
b@413 296 \textbf{Example usage:}\\
b@413 297
b@413 298 \lstset{
b@413 299 basicstyle=\ttfamily,
b@413 300 columns=fullflexible,
b@413 301 showstringspaces=false,
b@413 302 commentstyle=\color{grey}\upshape
b@413 303 }
b@413 304
b@413 305 \lstdefinelanguage{XML}
b@413 306 {
b@413 307 morestring=[b]",
b@413 308 morestring=[s]{>}{<},
b@413 309 morecomment=[s]{<?}{?>},
b@413 310 stringstyle=\color{black} \bfseries,
b@413 311 identifierstyle=\color{darkblue} \bfseries,
b@413 312 keywordstyle=\color{cyan} \bfseries,
b@413 313 morekeywords={xmlns,version,type},
b@413 314 breaklines=true% list your attributes here
b@413 315 }
b@413 316 \scriptsize
b@413 317 \lstset{language=XML}
b@413 318
b@413 319 \begin{lstlisting}
b@413 320 <PostTest>
b@413 321 <question id="location" mandatory="true" boxsize="large">Please enter your location. (example mandatory text question)</question>
b@413 322 <number id="age" min="0">Please enter your age (example non-mandatory number question)</number>
b@413 323 <radio id="rating">
b@413 324 <statement>Please rate this interface (example radio button question)</statement>
b@413 325 <option name="bad">Bad</option>
b@413 326 <option name="poor">Poor</option>
b@413 327 <option name="good">Good</option>
b@413 328 <option name="great">Great</option>
b@413 329 </radio>
b@439 330 <checkbox id="background" mandatory="true">
b@429 331 <statement>Please select with which activities you have any experience (example checkbox question)</statement>
b@429 332 <option name="musician">Playing a musical instrument</option>
b@429 333 <option name="soundengineer">Recording or mixing audio</option>
b@429 334 </checkbox>
b@413 335 <statement>Thank you for taking this listening test. Please click 'Submit' and your results will appear in the 'saves/' folder.</statement>
b@413 336 </PostTest>
b@413 337 \end{lstlisting}
b@413 338
b@413 339
b@439 340
b@413 341 \subsection{Randomisation}
b@439 342 [WORK IN PROGRESS]
b@413 343
b@413 344 \subsubsection{Randomisation of configuration XML files}
nicholas@434 345 The python server has a special function to automatically cycle through a list of test pages. Instead of directly requesting an XML, simply setting the url item in the browser URL to \texttt{pseudo.xml} will cycle through a list of XMLs. These XMLs must be in the local directory called \texttt{pseudo}.
b@413 346 % how to
b@413 347 % explain how this is implemented in the pythonServer
nicholas@434 348 %Nick? already implemented in the PHP?
nicholas@434 349 % Needs to be implemented in PHP and automated better, will complete soon
b@413 350
b@413 351
b@413 352 \subsubsection{Randomsation of page order}
nicholas@434 353 The page order randomisation is set by the \texttt{<setup>} node attribute \texttt{randomise-order}, for example \texttt{<setup ... randomise-order="true">...</setup>} will randomise the test page order. When not set, the default is to \textbf{not} randomise the test page order.
b@413 354
b@413 355 \subsubsection{Randomisation of axis order}
b@413 356
b@413 357 \subsubsection{Randomisation of fragment order}
nicholas@434 358 The audio fragment randomisation is set by the \texttt{<audioholder>} node attribute \texttt{randomise-order}, for example \texttt{<audioholder ... randomise-order="true">...</audioholder>} will randomise the test page order. When not set, the default is to \textbf{not} randomise the test page order.
b@413 359
b@413 360 \subsubsection{Randomisation of initial slider position}
nicholas@434 361 By default slider values are randomised on start. The MUSHRA interface supports setting the initial values of all sliders throught the \texttt{<audioholder>} attribute \texttt{initial-position}. This takes an integer between 0 and 100 to signify the slider position.
b@413 362 % /subsubsection{Randomisation of survey question order}
b@413 363 % should be an attribute of the individual 'pretest' and 'posttest' elements
b@413 364 % uncomment once we have it
b@413 365
b@413 366 \subsection{Looping}
nicholas@434 367 Looping enables the fragments to loop until stopped by the user. Looping is synchronous between samples so all samples start at the same time.
nicholas@434 368 Individual test pages can have their playback looped by the \texttt{<audioholder>} attribute \texttt{loop} with a value of "true" or "false".
b@413 369 If the fragments are not of equal length initially, they are padded with zeros so that they are equal length, to enable looping without the fragments going out of sync relative to each other.
b@413 370
nicholas@434 371 Note that fragments cannot be played until all page fragments are loaded when in looped mode, as the engine needs to know the amount to pad the fragments.
b@413 372
b@413 373 \subsection{Sample rate}
b@413 374 If you require the test to be conducted at a certain sample rate (i.e. you do not tolerate resampling of the elements to correspond with the system's sample rate), add \texttt{sampleRate="96000"} - where ``96000'' can be any support sample rate - so that a warning message is shown alerting the subject the system's sample rate is different from this enforced sample rate. This of course means that in one test, all sample rates must be equal as it is impossible to change the system's sample rates during the test (even if you were to manually change it, then the browser must be restarted for it to take effect).
b@413 375
b@413 376 \subsection{Scrubber bar}
b@413 377 The scrubber bar, or transport bar (that is the name of the visualisation of the playhead thing with an indication of time and showing the portion of the file played so far) is at this point just a visual, and not a controller to adjust the playhead position.
b@413 378
b@413 379 Make visible by adding \texttt{<option name='playhead'/>} to the \texttt{interface} node (see Section \ref{sec:checks}: Checks).
b@413 380
b@413 381 \subsection{Metrics}
nicholas@434 382 Enable the collection of metrics by adding \texttt{collectMetrics=`true'} in the \texttt{setup} node. % Should this always be on??
b@413 383
nicholas@434 384 The \texttt{Metric} node, which contains the metrics to be tracked during the complete test, is a child of the \texttt{setup} node, and it could look as follows.
b@413 385
b@413 386 \begin{lstlisting}
b@413 387 <Metric>
b@413 388 <metricEnable>testTimer</metricEnable>
b@413 389 <metricEnable>elementTimer</metricEnable>
b@413 390 <metricEnable>elementInitialPosition</metricEnable>
b@413 391 <metricEnable>elementTracker</metricEnable>
b@413 392 <metricEnable>elementFlagListenedTo</metricEnable>
b@413 393 <metricEnable>elementFlagMoved</metricEnable>
b@413 394 <metricEnable>elementListenTracker</metricEnable>
b@413 395 </Metric>
b@413 396 \end{lstlisting}
b@413 397
b@413 398 When in doubt, err on the inclusive side, as one never knows which information is needed in the future. Most of these metrics are necessary for post-processing scripts such as timeline\_view\_movement.py.
b@413 399
b@413 400 \subsubsection{Time test duration}
b@413 401 \texttt{testTimer}\\
nicholas@434 402 One per test page. Presents the total test time from the first playback on the test page to the submission of the test page (exculding test time of the pre-/post- test surveys). This is presented in the results as \texttt{<metricresult id="testTime"> 8.60299319727892 </metricresult>}. The time is in seconds.
b@413 403
b@413 404 \subsubsection{Time fragment playback}
b@413 405 \texttt{elementTimer}\\
nicholas@434 406 One per audio fragment per test page. This totals up the entire time the audio fragment has been listened to in this test and presented \texttt{<metricresult name="enableElementTimer"> 1.0042630385487428 </metricresult>}. The time is in seconds.
b@413 407
b@413 408 \subsubsection{Initial positions}
b@413 409 \texttt{elementInitialPosition}\\
nicholas@434 410 One per audio fragment per test page. Tracks the initial position of the sliders, especially relevant when these are randomised. Example result \texttt{<metricresult name="elementInitialPosition"> 0.8395522388059702 </metricresult>}.
b@413 411
b@413 412 \subsubsection{Track movements}
nicholas@434 413 \texttt{elementTracker}\\
nicholas@434 414 One per audio fragment per test page. Tracks the movement of each interface object. Each movement event has the time it occured at and the new value.
nicholas@434 415 \subsubsection{Which fragments listened to}
nicholas@434 416 \texttt{elementFlagListenedTo}\\
nicholas@434 417 One per audio fragment per test page. Boolean response, set to true if listened to.
nicholas@434 418 \subsubsection{Which fragments moved}
nicholas@434 419 \texttt{elementFlagMoved}\\
nicholas@434 420 One per audio fragment per test page. Binary check whether or not a the marker corresponding with a particular fragment was moved at all throughout the experiment.
b@413 421
nicholas@434 422 \subsubsection{elementListenTracker}
nicholas@434 423 \texttt{elementListenTracker}\\
nicholas@434 424 One per audio fragment per test page. Tracks the playback events of each audio element pairing both the time in the test when playback started and when it stopped, it also gives the buffertime positions.
b@413 425
b@413 426 \subsection{References and anchors}
nicholas@434 427 The audio elements, \texttt{<audioelement>} have the attribute \texttt{type}, which defaults to normal. Setting this to one of the following will have the following effects.
nicholas@434 428 \subsubsection{Outside Reference}
nicholas@434 429 Set type to 'outside-reference'. This will place the object in a separate playback element clearly labelled as an outside reference. This is exempt of any movement checks but will still be included in any listening checks.
b@413 430 \subsubsection{Hidden reference}
nicholas@434 431 Set type to 'reference'. The element will still be randomised as normal (if selected) and presented to the user. However the element will have the 'reference' type in the results to quickly find it. The reference can be forced to be below a value before completing the test page by setting the attribute 'marker' to be a value between 0 and 100 representing the integer value position it must be equal to or above.
b@413 432 \subsubsection{Hidden anchor}
nicholas@434 433 Set type to 'anchor'. The element will still be randomised as normal (if selected) and presented to the user. However the element will have the 'anchor' type in the results to quickly find it. The anchor can be forced to be below a value before completing the test page by setting the attribute 'marker' to be a value between 0 and 100 representing the integer value position it must be equal to or below.
b@413 434
b@413 435 \subsection{Checks}
b@413 436 \label{sec:checks}
b@413 437
b@413 438 %blabla
b@413 439 These checks are enabled in the \texttt{interface} node, which is a child of the \texttt{setup} node.
b@413 440 \subsubsection{Playback checks}
b@413 441 % what it does/is
b@413 442 Enforce playing each sample at least once, for at least a little bit (e.g. this test is satisfied even if you only play a tiny portion of the file), by alerting the user to which samples have not been played upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before clicking each sample at least once.
b@413 443 % how to enable/disable
b@413 444
b@413 445 Alternatively, one can check whether the \emph{entire} fragment was listened to at least once.
b@413 446 % how to enable
b@413 447
b@413 448 Add \texttt{<check name="fragmentPlayed"/>} to the \texttt{interface} node.
b@413 449
b@413 450
b@413 451 \subsubsection{Movement check}
b@413 452 Enforce moving each sample at least once, for at least a little bit (e.g. this test is satisfied even if you only play a tiny portion of the file), by alerting the user to which samples have not been played upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before clicking each sample at least once.
b@413 453 If there are several axes, the warning will specify which samples have to be moved on which axis.
b@413 454
b@413 455 Add \texttt{<check name="fragmentMoved"/>} to the \texttt{interface} node.
b@413 456
b@413 457 \subsubsection{Comment check}
b@413 458 % How to enable/disable?
b@413 459
b@413 460 Enforce commenting, by alerting the user to which samples have not been commented on upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before putting at least one character in each comment box.
b@413 461
b@413 462 Note that this does not apply to any extra (text, radio button, checkbox) elements, unless these have the `mandatory' option enabled. %Nick? is this extra 'mandatory' option implemented?
b@413 463
b@413 464 Add \texttt{<check name="fragmentComments"/>} to the \texttt{interface} node.
b@413 465
b@413 466 %ADD: how to add a custom comment box
b@413 467
b@413 468 \subsubsection{Scale use check}
b@413 469 It is possible to enforce a certain usage of the scale, meaning that at least one slider needs to be below and/or above a certain percentage of the slider.
b@413 470
b@413 471 Add \texttt{<check name="scalerange" min="25" max="75"/>} to the \texttt{interface} node.
b@413 472
b@413 473 \subsubsection{Note on the use of multiple rating axes}
b@413 474 I.e. what if more than one axis? How to specify which axis the checks relate to? %Nick? to add?
b@413 475
b@413 476 \subsection{Platform information}
b@413 477 % what does it do, what does it look like
b@413 478 % limitations?
b@439 479 For troubleshooting and usage statistics purposes, information about the browser and the operating system is logged in the results XML file. This is especially useful in the case of remote tests, when it is not certain which operating system, browser and/or browser were used. Note that this information is not always available and/or accurate, e.g. when the subject has taken steps to be more anonymous, so it should be treated as a guide only.
b@439 480
b@439 481 Example:
b@439 482 \begin{lstlisting}
b@439 483 <navigator>
b@439 484 <platform>MacIntel</platform>
b@439 485 <vendor>Google Inc.</vendor>
b@439 486 <uagent>Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36</uagent>
b@439 487 </navigator>
b@439 488 \end{lstlisting}
b@413 489
b@413 490 \subsection{Show progress}
b@413 491 Add \texttt{<option name="page-count"/>} to the \texttt{interface} node (see Section \ref{sec:checks}: Checks) to add the current page number and the total number of pages to the interface.
b@413 492
b@413 493 \subsection{Gain}
b@413 494 It is possible to set the gain (in decibel) applied to the different audioelements, as an attribute of the \texttt{audioelement} nodes in the configuration XML file:
b@413 495
nicholas@434 496 \texttt{<audioElements url="sample-01.wav" gain="-6" id="sample01quieter" />}\\
nicholas@434 497 Please note, there are no checks on this to detect if accidentaly typed in linear.
b@413 498
b@413 499 \subsection{Loudness}
b@413 500 % automatic loudness equalisation
b@413 501 % guide to loudness.js
nicholas@434 502 Each audio fragment on loading has its loudness calculated. The tool uses the EBU R 128 recommendation following the ITU-R BS.1770-4 loduness calculations to return the integreated LUFS loudness. The attribute \texttt{loudness} will set the loudness from the scope it is applied in. Applying it in the \texttt{<setup>} node will set the loudness for all test pages. Applying it in the \texttt{<audioholder>} node will set the loudness for that page. Applying it in the \texttt{<audioelement>} node will set the loudness for that fragment. The scope is set locally, so if there is a loudness on both the \texttt{<audioholder>} and \texttt{<setup>} nodes, that test page will take the value associated with the \texttt{<audioholder>}. The loudness attribute is set in LUFS
b@413 503
b@413 504 \clearpage
b@413 505
b@378 506
b@378 507 \section{Using the test create tool}
b@378 508 We provide a test creation tool, available in the directory test\_create. This tool is a self-contained web page, so doubling clicking will launch the page in your system default browser.
b@378 509
b@378 510 The test creation tool can help you build a simple test very quickly. By simply selecting your interface and clicking check-boxes you can build a test in minutes.
b@378 511
b@378 512 Include audio by dragging and dropping the stimuli you wish to include.
b@378 513
b@378 514 The tool examines your XML before exporting to ensure you do not export an invalid XML structure which would crash the test.
b@378 515
b@378 516 This guide will help you to construct your own interface on top of the WAET (Web Audio Evaluation Tool) engine. The WAET engine resides in the core.js file, this contains prototype objects to handle most of the test creation, operation and data collection. The interface simply has to link into this at the correct points.
nicholas@434 517
nicholas@434 518 \section{Building your own interface}
b@378 519
b@378 520 \subsection{Nodes to familiarise}
b@378 521 Core.js handles several very important nodes which you should become familiar with. The first is the Audio Engine, initialised and stored in variable `AudioEngineContext'. This handles the playback of the web audio nodes as well as storing the `AudioObjects'. The `AudioObjects' are custom nodes which hold the audio fragments for playback. These nodes also have a link to two interface objects, the comment box if enabled and the interface providing the ranking. On creation of an `AudioObject' the interface link will be nulled, it is up to the interface to link these correctly.
b@378 522
b@378 523 The specification document will be decoded and parsed into an object called `specification'. This will hold all of the specifications various nodes. The test pages and any pre/post test objects are processed by a test state which will proceed through the test when called to by the interface. Any checks (such as playback or movement checks) are to be completed by the interface before instructing the test state to proceed. The test state will call the interface on each page load with the page specification node.
b@378 524
b@378 525 \subsection{Modifying \texttt{core.js}}
b@378 526 Whilst there is very little code actually needed, you do need to instruct core.js to load your interface file when called for from a specification node. There is a function called `loadProjectSpecCallback' which handles the decoding of the specification and setting any external items (such as metric collection). At the very end of this function there is an if statement, add to this list with your interface string to link to the source. There is an example in there for both the APE and MUSHRA tests already included. Note: Any updates to core.js in future work will most likely overwrite your changes to this file, so remember to check your interface is still here after any update that interferes with core.js.
b@378 527 Any further files can be loaded here as well, such as css styling files. jQuery is already included.
b@378 528
b@378 529 \subsection{Building the Interface}
b@378 530 Your interface file will get loaded automatically when the `interface' attribute of the setup node matches the string in the `loadProjectSpecCallback' function. The following functions must be defined in your interface file.
b@378 531 \begin{itemize}
b@378 532 \item \texttt{loadInterface} - Called once when the document is parsed. This creates any necessary bindings, such as to the metric collection classes and any check commands. Here you can also start the structure for your test such as placing in any common nodes (such as the title and empty divs to drop content into later).
b@378 533 \item \texttt{loadTest(audioHolderObject)} - Called for each page load. The audioHolderObject contains a specification node holding effectively one of the audioHolder nodes.
b@378 534 \item \texttt{resizeWindow(event)} - Handle for any window resizing. Simply scale your interface accordingly. This function must be here, but can me an empty function call.
b@378 535 \end{itemize}
b@378 536
b@378 537 \subsubsection{loadInterface}
b@378 538 This function is called by the interface once the document has been parsed since some browsers may parse files asynchronously. The best method is simply to put `loadInterface()' at the top of your interface file, therefore when the JavaScript engine is ready the function is called.
b@378 539
b@378 540 By default the HTML file has an element with id ``topLevelBody'' where you can build your interface. Make sure you blank the contents of that object. This function is the perfect time to build any fixed items, such as the page title, session titles, interface buttons (Start, Stop, Submit) and any holding and structure elements for later on.
b@378 541
b@378 542 At the end of the function, insert these two function calls: testState.initialise() and testState.advanceState();. This will actually begin the test sequence, including the pre-test options (if any are included in the specification document).
b@378 543
b@378 544 \subsubsection{loadTest(audioHolderObject)}
b@378 545 This function is called on each new test page. It is this functions job to clear out the previous test and set up the new page. Use the function audioEngineContext.newTestPage(); to instruct the audio engine to prepare for a new page. ``audioEngineContext.audioObjects = [];'' will delete any audioObjects, interfaceContext.deleteCommentBoxes(); will delete any comment boxes and interfaceContext.deleteCommentQuestions(); will delete any extra comment boxes specified by commentQuestion nodes.
b@378 546
b@378 547 This function will need to instruct the audio engine to build each fragment. Just passing the constructor each element from the audioHolderObject will build the track, audioEngineContext.newTrack(element) (where element is the audioHolderObject audio element). This will return a reference to the constructed audioObject. Decoding of the audio will happen asynchronously.
b@378 548
b@378 549 You also need to link audioObject.interfaceDOM with your interface object for that audioObject. The interfaceDOM object has a few default methods. Firstly it must start disabled and become enabled once the audioObject has decoded the audio (function call: enable()). Next it must have a function exportXMLDOM(), this will return the xml node for your interface, however the default is for it to return a value node, with textContent equal to the normalised value. You can perform other functions, but our scripts may not work if something different is specified (as it will breach our results specifications). Finally it must also have a method getValue, which returns the normalised value.
b@378 550
b@378 551 It is also the job the interfaceDOM to call any metric collection functions necessary, however some functions may be better placed outside (for example, the APE interface uses drag and drop, therefore the best way was to call the metric functions from the dragEnd function, which is called when the interface object is dropped). Metrics based upon listening are handled by the audioObject. The interfaceDOM object must manage any movement metrics. For a list of valid metrics and their behaviours, look at the project specification document included in the repository/docs location. The same goes for any checks required when pressing the submit button, or any other method to proceed the test state.
b@378 552
b@413 553 \clearpage
b@413 554 \section{Analysis and diagnostics}
b@413 555 \subsection{In the browser}
b@413 556 See `analysis.html' in the main folder: immediate visualisation of (by default) all results in the `saves/' folder.
b@413 557
b@413 558 \subsection{Python scripts}
b@413 559 The package includes Python (2.7) scripts (in `scripts/') to extract ratings and comments, generate visualisations of ratings and timelines, and produce a fully fledged report.
b@413 560
b@413 561 Visualisation requires the free matplotlib toolbox (http://matplotlib.org), numpy and scipy.
b@413 562 By default, the scripts can be run from the `scripts' folder, with the result files in the `saves' folder (the default location where result XMLs are stored). Each script takes the XML file folder as an argument, along with other arguments in some cases.
b@413 563 Note: to avoid all kinds of problems, please avoid using spaces in file and folder names (this may work on some systems, but others don't like it).
b@413 564
b@413 565 \subsubsection{comment\_parser.py}
b@413 566 Extracts comments from the output XML files corresponding with the different subjects found in `saves/'. It creates a folder per `audioholder'/page it finds, and stores a CSV file with comments for every `audioelement'/fragment within these respective `audioholders'/pages. In this CSV file, every line corresponds with a subject/output XML file. Depending on the settings, the first column containing the name of the corresponding XML file can be omitted (for anonymisation).
b@413 567 Beware of Excel: sometimes the UTF-8 is not properly imported, leading to problems with special characters in the comments (particularly cumbersome for foreign languages).
b@413 568
b@413 569 \subsubsection{evaluation\_stats.py}
b@413 570 Shows a few statistics of tests in the `saves/' folder so far, mainly for checking for errors. Shows the number of files that are there, the audioholder IDs that were tested (and how many of each separate ID), the duration of each page, the duration of each complete test, the average duration per page, and the average duration in function of the page number.
b@413 571
b@413 572 \subsubsection{generate\_report.py}
b@413 573 Similar to `evaluation\_stats.py', but generates a PDF report based on the output files in the `saves/' folder - or any folder specified as command line argument. Uses pdflatex to write a LaTeX document, then convert to a PDF.
b@413 574
b@413 575 \subsubsection{score\_parser.py}
b@413 576 Extracts rating values from the XML to CSV - necessary for running visualisation of ratings. Creates the folder `saves/ratings/' if not yet created, to which it writes a separate file for every `audioholder'/page in any of the output XMLs it finds in `saves/'. Within each file, rows represent different subjects (output XML file names) and columns represent different `audioelements'/fragments.
b@413 577
b@413 578 \subsubsection{score\_plot.py}
b@413 579 Plots the ratings as stored in the CSVs created by score\_parser.py
b@413 580 Depending on the settings, it displays and/or saves (in `saves/ratings/') a boxplot, confidence interval plot, scatter plot, or a combination of the aforementioned.
b@413 581 Requires the free matplotlib library.
b@413 582 At this point, more than one subjects are needed for this script to work.
b@413 583
b@413 584 \subsubsection{timeline\_view\_movement.py}
b@413 585 Creates a timeline for every subject, for every `audioholder'/page, corresponding with any of the output XML files found in `saves/'. It shows the marker movements of the different fragments, along with when each fragment was played (red regions). Automatically takes fragment names, rating axis title, rating axis labels, and audioholder name from the XML file (if available).
b@413 586
b@413 587 \subsubsection{timeline\_view.py} % should be omitted or absorbed by the above soon
b@413 588 Creates a timeline for every subject, for every `audioholder'/page, corresponding with any of the output XML files found in `saves/'. It shows when and for how long the subject listened to each of the fragments.
b@413 589
b@378 590
b@378 591
b@378 592 \clearpage
b@378 593 \section{Troubleshooting} \label{sec:troubleshooting}
b@413 594 \subsection{Reporting bugs and requesting features}
b@413 595 Thanks to feedback from using the interface in experiments by the authors and others, many bugs have been caught and fatal crashes due to the interface seem to be a thing of the past entirely.
b@378 596
b@413 597 We continually develop this tool to fix issues and implement features useful to us or our user base. See \url{https://code.soundsoftware.ac.uk/projects/webaudioevaluationtool/issues} for a list of feature requests and bug reports, and their status.
b@378 598
b@413 599 Please contact the authors if you experience any bugs, if you would like additional functionality, if you spot any errors or gaps in the documentation, if you have questions about using the interface, or if you would like to give any feedback (even positive!) about the interface. We look forward to learning how the tool has (not) been useful to you.
b@378 600
b@378 601
b@413 602 \subsection{First aid}
b@413 603 Meanwhile, if things do go wrong or the test needs to be interrupted for whatever reason, all data is not lost. In a normal scenario, the test needs to be completed until the end (the final `Submit'), at which point the output XML is stored in the \texttt{saves/}. If this stage is not reached, open the JavaScript Console (see below for how to find it) and type
b@378 604
b@413 605 \texttt{createProjectSave()}
b@378 606
b@413 607 to present the result XML file on the client side, or
b@378 608
b@413 609 \texttt{createProjectSave(specification.projectReturn)}
b@378 610
b@413 611 to try to store it to the specified location, e.g. the `saves/' folder on the web server or the local machine (on failure the result XML should be presented directly in the web browser instead)
b@378 612
b@413 613 and hit enter. This will open a pop-up window with a hyperlink that reads `Save File'; click it and an XML file with results until that point should be stored in your download folder.
b@413 614
b@413 615 Alternatively, a lot of data can be read from the same console, in which the tool prints a lot of debug information. Specifically:
b@413 616 \begin{itemize}
b@413 617 \item the randomisation of pages and fragments are logged;
b@413 618 \item any time a slider is played, its ID and the time stamp (in seconds since the start of the test) are displayed;
b@413 619 \item any time a slider is dragged and dropped, the location where it is dropped including the time stamp are shown;
b@413 620 \item any comments and pre- or post-test questions and their answers are logged as well.
b@413 621 \end{itemize}
b@378 622
b@413 623 You can select all this and save into a text file, so that none of this data is lost. You may to choose to do this even when a test was successful as an extra precaution.
b@378 624
b@413 625 If you encounter any issue which you believe to be caused by any aspect of the tool, and/or which the documentation does not mention, please do let us know!
b@378 626
b@413 627 \subsubsection*{Opening the JavaScript Console}
b@413 628 \begin{itemize}
b@413 629 \item In Google Chrome, the JavaScript Console can be found in \textbf{View$>$Developer$>$JavaScript Console}, or via the keyboard shortcut Cmd + Alt + J (Mac OS X).
b@413 630 \item In Safari, the JavaScript Console can be found in \textbf{Develop$>$Show Error Console}, or via the keyboard shortcut Cmd + Alt + C (Mac OS X). Note that for the Developer menu to be visible, you have to go to Preferences (Cmd + ,) and enable `Show Develop menu in menu bar' in the `Advanced' tab. \textbf{Note that as long as the Developer menu is not visible, nothing is logged to the console, i.e. you will only be able to see diagnostic information from when you switched on the Developer tools onwards.}
b@413 631 \item In Firefox, go to \textbf{Tools$>$Web Developer$>$Web Console}, or hit Cmd + Alt + K.
b@413 632 \end{itemize}
b@378 633
b@413 634 \subsection{Known issues and limitations}
b@413 635 \label{sec:knownissues}
b@413 636
b@413 637 The following is a non-exhaustive list of problems and limitations you may experience using this tool, due to not being supported yet by us, or by the Web Audio API and/or (some) browsers.
b@413 638
b@413 639 \begin{itemize}
b@413 640 \item Issue \href{https://code.soundsoftware.ac.uk/issues/1463}{\textbf{\#1463}}: \textbf{Firefox} only supports 8 bit and 16 bit WAV files. Pending automatic requantisation (which deteriorates the audio signal's dynamic range to some extent), WAV format stimuli need to adhere to these limitations in order for the test to be compatible with Firefox.
b@413 641 \item Issues \href{https://code.soundsoftware.ac.uk/issues/1474}{\textbf{\#1474}} and \href{https://code.soundsoftware.ac.uk/issues/1462}{\textbf{\#1462}}: On occasions, audio is not working - or only a continuous `beep' can be heard - notably in \textbf{Safari}. Refreshing, quitting the browser and even enabling Developer tools in Safari's Preferences pane (`Advanced' tab: ``Show `Develop' menu in menu bar'') has helped resolve this. If no (high quality) audio can be heard, make sure your entire playback system's settings are all correct.
b@413 642 \end{itemize}
b@378 643
b@378 644 \clearpage
b@378 645 \bibliographystyle{ieeetr}
b@378 646 \bibliography{Instructions}{}
b@378 647
b@378 648
b@378 649 \clearpage
b@378 650 \appendix
b@378 651
b@413 652 \section{Legacy}
b@413 653 The APE interface and most of the functionality of the first WAET editions are inspired by the APE toolbox for MATLAB \cite{ape}. See \url{https://code.soundsoftware.ac.uk/projects/ape} for the source code and \url{http://brechtdeman.com/publications/aes136.pdf} for the corresponding paper.
b@413 654
b@413 655 \clearpage
b@413 656
b@378 657 \section{Listening test instructions example}
b@378 658
b@378 659 Before each test, show the instructions below or similar and make sure it is available to the subject throughout the test. Make sure to ask whether the participant has any questions upon seeing and/or reading the instructions.
b@378 660
b@378 661 \begin{itemize}
b@378 662 \item You will be asked for your name (``John Smith'') and location (room identifier).
b@378 663 \item An interface will appear, where you are asked to
b@378 664 \begin{itemize}
b@378 665 \item click green markers to play the different mixes;
b@378 666 \item drag the markers on a scale to reflect your preference for the mixes;
b@378 667 \item comment on these mixes, using text boxes with corresponding numbers (in your \textbf{native language});
b@378 668 \item optionally comment on all mixes together, or on the song, in `General comments'.
b@378 669 \end{itemize}
b@378 670 \item You are asked for your personal, honest opinion. Feel free to use the full range of the scale to convey your opinion of the various mixes. Don?t be afraid to be harsh and direct.
b@378 671 \item The markers appear at random positions at first (which means some markers may hide behind others).
b@378 672 \item The interface can take a few seconds to start playback, but switching between mixes should be instantaneous.
b@378 673 \item This is a research experiment, so please forgive us if things go wrong. Let us know immediately and we will fix it or restart the test.
b@378 674 \item When the test is finished (after all songs have been evaluated), just call the experimenter, do NOT close the window.
b@378 675 \item After the test, please fill out our survey about your background, experience and feedback on the test.
b@378 676 \item By participating, you consent to us using all collected data for research. Unless asked explicitly, all data will be anonymised when shared.
b@378 677 \end{itemize}
b@378 678
b@378 679 \clearpage
b@378 680
b@429 681 \section{Terminology} % just to keep track of what exactly we call things. Don't use terms that are too different, to avoid confusion.
b@429 682 As a guide to better understand the Instructions, and to expand them later, here is a list of terms that may be unclear or ambiguous unless properly defined.
b@413 683 \begin{description}
b@413 684 \item[Subject] The word we use for a participant, user, ... of the test, i.e. not the experimenter who designs the test but the person who evaluates the audio under test as part of an experiment (or the preparation of one).
b@429 685 \item[User] The person who uses the tool to configure, run and analyse the test - i.e. the experimenter, most likely a researcher - or at least
b@413 686 \item[Page] A screen in a test; corresponds with an \texttt{audioholder}
b@439 687 \item[Fragment] An element, stimulus or sample in a test; corresponds with an \texttt{audioelement}
b@413 688 \item[Test] A complete test which can consist of several pages; corresponds with an entire configuration XML file
b@413 689 \item[Configuration XML file] The XML file containing the necessary information on interface, samples, survey questions, configurations, ... which the JavaScript modules read to produce the desired test.
b@413 690 \item[Results XML file] The output of a successful test, including ratings, comments, survey responses, timing information, and the complete configuration XML file with which the test was generated in the first place.
b@413 691 \end{description}
b@413 692
b@413 693 \clearpage
b@413 694
b@413 695 \setcounter{secnumdepth}{0} % don't number this last bit
b@413 696 \section{Contact details} % maybe add web pages, Twitter accounts, whatever you like
b@378 697 \label{sec:contact}
b@378 698
b@378 699 \begin{itemize}
b@378 700 \item Nicholas Jillings: \texttt{nicholas.jillings@mail.bcu.ac.uk}
b@378 701 \item Brecht De Man: \texttt{b.deman@qmul.ac.uk}
b@378 702 \item David Moffat: \texttt{d.j.moffat@qmul.ac.uk}
b@378 703 \end{itemize}
b@378 704
b@378 705 \end{document}