annotate docs/Instructions/Instructions.tex @ 1121:595511282fa7

Removed Schema document and xmllint for validation. Schema not possible for <survey> ordering of any element any order any number type.
author Nicholas Jillings <n.g.r.jillings@se14.qmul.ac.uk>
date Tue, 29 Dec 2015 11:39:46 +0000
parents 3edcbbea168b
children fb062819d956
rev   line source
n@1118 1 \documentclass[11pt, oneside]{article} % use "amsart" instead of "article" for AMSLaTeX format
n@1118 2 \usepackage{geometry} % See geometry.pdf to learn the layout options. There are lots.
n@1118 3 \geometry{letterpaper} % ... or a4paper or a5paper or ...
n@1118 4 %\geometry{landscape} % Activate for rotated page geometry
n@1118 5 \usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent
n@1118 6 \usepackage{graphicx} % Use pdf, png, jpg, or eps§ with pdflatex; use eps in DVI mode
n@1118 7 % TeX will automatically convert eps --> pdf in pdflatex
n@1118 8
n@1118 9 \usepackage{listings} % Source code
n@1118 10 \usepackage{xcolor} % colour (source code for instance)
n@1118 11 \definecolor{grey}{rgb}{0.1,0.1,0.1}
n@1118 12 \definecolor{darkblue}{rgb}{0.0,0.0,0.6}
n@1118 13 \definecolor{cyan}{rgb}{0.0,0.6,0.6}
n@1118 14
n@1118 15 \usepackage{amssymb}
n@1118 16 \usepackage{cite}
n@1118 17 \usepackage{hyperref} % Hyperlinks
n@1118 18 \usepackage[nottoc,numbib]{tocbibind} % 'References' in TOC
n@1118 19
n@1118 20 \graphicspath{{img/}} % Relative path where the images are stored.
n@1118 21
n@1118 22 \title{Instructions for \\ Web Audio Evaluation Tool}
n@1118 23 \author{Nicholas Jillings, Brecht De Man and David Moffat}
n@1118 24 \date{7 December 2015} % Activate to display a given date or no date
n@1118 25
n@1118 26 \begin{document}
n@1118 27 \maketitle
n@1118 28
n@1118 29 These instructions are about use of the Web Audio Evaluation Tool on Windows and Mac OS X platforms.
n@1118 30
n@1118 31 We request that you acknowledge the authors and cite our work when using it \cite{waet}, see also CITING.txt.
n@1118 32
n@1118 33 The tool is available in its entirety including source code on \url{https://code.soundsoftware.ac.uk/projects/webaudioevaluationtool/}, under the GNU General Public License v3.0 (\url{http://choosealicense.com/licenses/gpl-3.0/}), see also LICENSE.txt.
n@1118 34
n@1118 35 % TO DO: Linux (Android, iOS)
n@1118 36
n@1118 37 \tableofcontents
n@1118 38
n@1118 39 \clearpage
n@1118 40
n@1118 41 \section{Installation}
n@1118 42 Download the folder (\url{https://code.soundsoftware.ac.uk/hg/webaudioevaluationtool/archive/tip.zip}) and unzip in a location of your choice, or pull the source code from \url{https://code.soundsoftware.ac.uk/hg/webaudioevaluationtool} (Mercurial).
n@1118 43
n@1118 44 \subsection{Contents}
n@1118 45 The folder should contain the following elements: \\
n@1118 46
n@1118 47 \textbf{Main folder:}
n@1118 48 \begin{itemize}
n@1118 49 \item \texttt{analyse.html}: analysis and diagnostics of a set of result XML files
n@1118 50 \item \texttt{ape.css, core.css, graphics.css, mushra.css, structure.css}: style files (edit to change appearance)
n@1118 51 \item \texttt{ape.js}: JavaScript file for APE-style interface \cite{ape}
n@1118 52 \item \texttt{CITING.txt, LICENSE.txt, README.txt}: text files with, respectively, the citation which we ask to include in any work where this tool or any portion thereof is used, modified or otherwise; the license under which the software is shared; and a general readme file referring to these instructions.
n@1118 53 \item \texttt{core.js}: JavaScript file with core functionality
n@1118 54 \item \texttt{index.html}: webpage where interface should appear (includes link to test configuration XML)
n@1118 55 \item \texttt{jquery-2.1.4.js}: jQuery JavaScript Library
n@1118 56 \item \texttt{loudness.js}: Allows for automatic calculation of loudness of Web Audio API Buffer objects, return gain values to correct for a target loudness or match loudness between multiple objects
n@1118 57 \item \texttt{mushra.js}: JavaScript file for MUSHRA-style interface \cite{mushra}
n@1118 58 \item \texttt{pythonServer.py}: webserver for running tests locally
n@1118 59 \item \texttt{pythonServer-legacy.py}: webserver with limited functionality (no automatic storing of output XML files)
n@1118 60 \item \texttt{save.php}: PHP script to store result XML files to web server\\
n@1118 61 \end{itemize}
n@1118 62 \textbf{Documentation (./docs/)}
n@1118 63 \begin{itemize}
n@1118 64 \item \href{http://c4dm.eecs.qmul.ac.uk/dmrn/events/dmrnp10/#posters}{DMRN+10}: PDF and \LaTeX source of poster for 10\textsuperscript{th} Digital Music Research Network One-Day workshop (``soft launch'')
n@1118 65 \item Instructions: PDF and \LaTeX source of these instructions
n@1118 66 \item Project Specification Document (\LaTeX/PDF)
n@1118 67 \item Results Specification Document (\LaTeX/PDF)
n@1118 68 \item SMC15: PDF and \LaTeX source of 12th Sound and Music Computing Conference paper \cite{waet}
n@1118 69 \item WAC2016: PDF and \LaTeX source of 2nd Web Audio Conference paper\\
n@1118 70 \end{itemize}
n@1118 71 \textbf{Example project (./example\_eval/)}
n@1118 72 \begin{itemize}
n@1118 73 \item An example of what the set up XML should look like, with example audio files 0.wav-10.wav which are short recordings at 44.1kHz, 16bit of a woman saying the corresponding number (useful for testing randomisation and general familiarisation with the interface).\\
n@1118 74 \end{itemize}
n@1118 75 \textbf{Output files (./saves/)}
n@1118 76 \begin{itemize}
n@1118 77 \item The output XML files of tests will be stored here by default by the \texttt{pythonServer.py} script.\\
n@1118 78 \end{itemize}
n@1118 79 \textbf{Auxiliary scripts (./scripts/)}
n@1118 80 \begin{itemize}
n@1118 81 \item Helpful Python scripts for extraction and visualisation of data.\\
n@1118 82 \end{itemize}
n@1118 83 \textbf{Test creation tool (./test\_create/)}
n@1118 84 \begin{itemize}
n@1118 85 \item Webpage for easily setting up your own test without having to delve into the XML.\\
n@1118 86 \end{itemize}
n@1118 87
n@1118 88 \subsection{Compatibility}
n@1118 89 As Microsoft Internet Explorer doesn't support the Web Audio API\footnote{\url{http://caniuse.com/\#feat=audio-api}}, you will need another browser like Google Chrome, Safari or Firefox (all three are tested and confirmed to work).
n@1118 90
n@1118 91 Firefox does not currently support other bit depths than 8 or 16 bit for PCM wave files. In the future, this will throw a warning message to tell the user that their content is being quantised automatically. %Nick? Right? To be removed if and when actually implemented
n@1118 92
n@1118 93 The tool is platform-independent and works in any browser that supports the Web Audio API. It does not require any specific, proprietary software. However, in case the tool is hosted locally (i.e. you are not hosting it on an actual webserver) you will need Python (2.7), which is a free programming language - see the next paragraph.
n@1118 94
n@1118 95 \clearpage
n@1118 96
n@1118 97
n@1118 98 \section{Test setup}
n@1118 99
n@1118 100 \subsection{Sample rate}
n@1118 101 Depending on how the experiment is set up, audio is resampled automatically (the Web Audio default) or the sample rate is enforced. In the latter case, you will need to make sure that the sample rate of the system is equal to the sample rate of these audio files. For this reason, all audio files in the experiment will have to have the same sample rate.
n@1118 102
n@1118 103 Always make sure that all other digital equipment in the playback chain (clock, audio interface, digital-to-analog converter, ...) is set to this same sample rate.
n@1118 104
n@1118 105 Note that upon changing the sampling rate, the browser will have to be restarted for the change to take effect.
n@1118 106
n@1118 107 \subsubsection{Mac OS X}
n@1118 108 To change the sample rate in Mac OS X, go to \textbf{Applications/Utilities/Audio MIDI Setup} or find this application with Spotlight (see Figure \ref{fig:audiomidisetup}). Then select the output of the audio interface you are using and change the `Format' to the appropriate number. Also make sure the bit depth and channel count are as desired.
n@1118 109 If you are using an external audio interface, you may have to go to the preference pane of that device to change the sample rate.
n@1118 110
n@1118 111 Also make sure left and right channel gains are equal, as some applications alter this without changing it back, leading to a predominantly louder left or right channel. See Figure \ref{fig:audiomidisetup} for an example where the channel gains are different.
n@1118 112
n@1118 113 \begin{figure}[tb]
n@1118 114 \centering
n@1118 115 \includegraphics[width=.65\textwidth]{img/audiomidisetup.png}
n@1118 116 \caption{The Audio MIDI Setup window in Mac OS X}
n@1118 117 \label{fig:audiomidisetup}
n@1118 118 \end{figure}
n@1118 119
n@1118 120 \subsubsection{Windows}
n@1118 121 To change the sample rate in Windows, right-click on the speaker icon in the lower-right corner of your desktop and choose `Playback devices'. Right-click the appropriate playback device and click `Properties'. Click the `Advanced' tab and verify or change the sample rate under `Default Format'. % NEEDS CONFIRMATION
n@1118 122 If you are using an external audio interface, you may have to go to the preference pane of that device to change the sample rate.
n@1118 123
n@1118 124 \subsection{Local test}
n@1118 125 If the test is hosted locally, you will need to run the local webserver provided with this tool.
n@1118 126
n@1118 127 \subsubsection{Mac OS X \& Linux}
n@1118 128
n@1118 129 On Mac OS X, Python comes preinstalled, as with most Unix/Linux distributions.
n@1118 130
n@1118 131 Open the Terminal (find it in \textbf{Applications/Terminal} or via Spotlight), and go to the folder you downloaded. To do this, type \texttt{cd [folder]}, where \texttt{[folder]} is the folder where to find the \texttt{pythonServer.py} script you downloaded. For instance, if the location is \texttt{/Users/John/Documents/test/}, then type
n@1118 132
n@1118 133 \texttt{cd /Users/John/Documents/test/}
n@1118 134
n@1118 135 Then hit enter and run the Python script by typing
n@1118 136
n@1118 137 \texttt{python pythonServer.py}
n@1118 138
n@1118 139 and hit enter again. See also Figure \ref{fig:terminal}.
n@1118 140
n@1118 141 \begin{figure}[htbp]
n@1118 142 \begin{center}
n@1118 143 \includegraphics[width=.75\textwidth]{pythonServer.png}
n@1118 144 \caption{Mac OS X: The Terminal window after going to the right folder (\texttt{cd [folder\_path]}) and running \texttt{pythonServer.py}.}
n@1118 145 \label{fig:terminal}
n@1118 146 \end{center}
n@1118 147 \end{figure}
n@1118 148
n@1118 149 Alternatively, you can simply type \texttt{python} (follwed by a space) and drag the file into the Terminal window from Finder. % DOESN'T WORK YET
n@1118 150
n@1118 151 You can leave this running throughout the different experiments (i.e. leave the Terminal open). Once running the terminal will report the current URL to type into your browser to initiate the test, usually this is http://localhost:8000/.
n@1118 152
n@1118 153 To start the test, open the browser and type
n@1118 154
n@1118 155 \texttt{localhost:8000}
n@1118 156
n@1118 157 and hit enter. The test should start (see Figure \ref{fig:test}).
n@1118 158
n@1118 159 To quit the server, either close the terminal window or press Ctrl+C on your keyboard to forcibly shut the server.
n@1118 160
n@1118 161 \subsubsection{Windows}
n@1118 162
n@1118 163 On Windows, Python 2.7 is not generally preinstalled and therefore has to be downloaded\footnote{\url{https://www.python.org/downloads/windows/}} and installed to be able to run scripts such as the local webserver, necessary if the tool is hosted locally.
n@1118 164
n@1118 165 Simply double click the Python script \texttt{pythonServer.py} in the folder you downloaded.
n@1118 166
n@1118 167 You may see a warning like the one in Figure \ref{fig:warning}. Click `Allow access'.
n@1118 168
n@1118 169 \begin{figure}[htbp]
n@1118 170 \begin{center}
n@1118 171 \includegraphics[width=.6\textwidth]{warning.png}
n@1118 172 \caption{Windows: Potential warning message when executing \texttt{pythonServer.py}.}
n@1118 173 \label{fig:warning}
n@1118 174 \end{center}
n@1118 175 \end{figure}
n@1118 176
n@1118 177 The process should now start, in the Command prompt that opens - see Figure \ref{fig:python}.
n@1118 178
n@1118 179 \begin{figure}[htbp]
n@1118 180 \begin{center}
n@1118 181 \includegraphics[width=.75\textwidth]{python.png}
n@1118 182 \caption{Windows: The Command Prompt after running \texttt{pythonServer.py} and opening the corresponding website.}
n@1118 183 \label{fig:python}
n@1118 184 \end{center}
n@1118 185 \end{figure}
n@1118 186
n@1118 187 You can leave this running throughout the different experiments (i.e. leave the Command Prompt open).
n@1118 188
n@1118 189 To start the test, open the browser and type
n@1118 190
n@1118 191 \texttt{localhost:8000}
n@1118 192
n@1118 193 and hit enter. The test should start (see Figure \ref{fig:test}).
n@1118 194
n@1118 195 \begin{figure}[htb]
n@1118 196 \begin{center}
n@1118 197 \includegraphics[width=.8\textwidth]{test.png}
n@1118 198 \caption{The start of the test in Google Chrome on Windows 7.}
n@1118 199 \label{fig:test}
n@1118 200 \end{center}
n@1118 201 \end{figure}
n@1118 202
n@1118 203 If at any point in the test the participant reports weird behaviour or an error of some kind, or the test needs to be interrupted, please notify the experimenter and/or refer to Section \ref{sec:troubleshooting}.
n@1118 204
n@1118 205 When the test is over (the subject should see a message to that effect, and click `Submit' one last time), the output XML file containing all collected data should have appeared in `saves/'. The names of these files are `test-0.xml', `test-1.xml', etc., in ascending order. The Terminal or Command prompt running the local web server will display the following file name. If such a file did not appear, please again refer to Section \ref{sec:troubleshooting}.
n@1118 206
n@1118 207 It is advised that you back up these results as often as possible, as a loss of this data means that the time and effort spent by the subject(s) has been in vain. Save the results to an external or network drive, and/or send them to the experimenter regularly.
n@1118 208
n@1118 209 To start the test again for a new participant, you do not need to close the browser or shut down the Terminal or Command Prompt. Simply refresh the page or go to \texttt{localhost:8000} again.
n@1118 210
n@1118 211
n@1118 212 \subsection{Remote test}
n@1118 213 Put all files on a web server which supports PHP. This allows the `save.php' script to store the XML result files in the `saves/' folder. If the web server is not able to store the XML file there at the end of the test, it will present the XML file locally to the user, as a `Save file' link.
n@1118 214
n@1118 215 Make sure the \texttt{projectReturn} attribute of the \texttt{setup} node is set to the \texttt{save.php} script.
n@1118 216
n@1118 217 Then, just go to the URL of the corresponding HTML file, e.g. \texttt{http://server.com/path/to/WAET/index.html?url=test/my-test.xml}. If storing on the server doesn't work at submission (e.g. if the \texttt{projectReturn} attribute isn't properly set), the result XML file will be presented to the subject on the client side, as a `Save file' link.
n@1118 218
n@1118 219 \subsection{Multiple test documents}
n@1118 220 By default the index page will load a demo page of tests. To automatically load a test document, you need to append the location in the URL. If your URL is normally http://localhost:8000/index.html you would append the following: \texttt{?url=/path/to/your/test.xml}. Replace the fields with your actual path, the path is local to the running directory, so if you have your test in the directory \texttt{example\_eval} called \texttt{project.xml} you would append \texttt{?url=/example\_eval/project.xml}.
n@1118 221
n@1118 222 \clearpage
n@1118 223
n@1118 224 \section{Interfaces}
n@1118 225
n@1118 226 The Web Audio Evaluation Tool comes with a number of interface styles, each of which can be customised extensively, either by configuring them differently using the many optional features, or by modifying the JavaScript files.
n@1118 227
n@1118 228 To set the interface style for the whole test, %Nick? change when this is not the case anymore, i.e. when the interface can be set per page
n@1118 229 add \texttt{interface="APE"} to the \texttt{setup} node, where \texttt{"APE"} is one of the interface names below.
n@1118 230
n@1118 231 \subsection{APE}
n@1118 232 The APE interface is based on \cite{ape}, and consists of one or more axes, each corresponding with an attribute to be rated, on which markers are placed. As such, it is a multiple stimulus interface where (for each dimension or attribute) all elements are on one axis so that they can be maximally compared against each other, as opposed to rated individually or with regards to a single reference.
n@1118 233 It also contains an optional text box for each element, to allow for clarification by the subject, tagging, and so on.
n@1118 234
n@1118 235 \subsection{MUSHRA}
n@1118 236 This is a straightforward implementation of \cite{mushra}, especially common for the rating of audio quality, for instance for the evaluation of audio codecs.
n@1118 237
n@1118 238
n@1118 239 \clearpage
n@1118 240
n@1118 241 \section{Features}
n@1118 242
n@1118 243 This section covers the different features implemented in the Web Audio Evaluation Tool, how to use them, and what to know about them.
n@1118 244
n@1118 245 Unless otherwise specified, \emph{each} feature described here is optional, i.e. it can be enabled or disabled and adjusted to some extent.
n@1118 246
n@1118 247 As the example project showcases (nearly) all of these features, please refer to its configuration XML document for a demonstration of how to enable and adjust them.
n@1118 248
n@1118 249 \subsection{Interface layout}
n@1118 250 The \texttt{interface} node (child of \texttt{audioholder}) contains
n@1118 251
n@1118 252 Example:
n@1118 253
n@1118 254 \begin{lstlisting}
n@1118 255 <interface name="quality">
n@1118 256 <title>Audio Quality</title>
n@1118 257 <scale position="10">Poor</scale>
n@1118 258 <scale position="90">Excellent</scale>
n@1118 259 <commentBoxPrefix>Comment on fragment</commentBoxPrefix>
n@1118 260 </interface>
n@1118 261 \end{lstlisting}
n@1118 262
n@1118 263 \subsubsection{Title}
n@1118 264 Specifies the axis title as displayed on the interface.
n@1118 265
n@1118 266 If this tag is absent, the title will default to `Axis \emph{[number]}'. Therefore, if no title is desired, just add the title tag (\texttt{<title/>}) without text.
n@1118 267
n@1118 268 \subsubsection{Annotation}
n@1118 269 Words or numbers can be placed on specific positions of the scale with the \texttt{scale} tag. The \texttt{position} attribute is a value from 0 to 100, corresponding to the percentage of the width/height of the scale where you want the string to be placed.
n@1118 270
n@1118 271 \subsubsection{Comment box prefix}
n@1118 272 If comment boxes corresponding with the fragments are enabled, this sets the comment box string after which the fragment number is appended.
n@1118 273
n@1118 274 The default value is ``Comment on fragment''. So in this case, each comment box would have a header ``Comment on fragment \emph[number]''.
n@1118 275
n@1118 276 \subsubsection{Multiple scales}
n@1118 277 In the case of multiple rating scales, e.g. when the stimuli are to be rated in terms of attributes `timbre' and `spatial impression', multiple interface nodes will have to be added, each specifying the title and annotations.
n@1118 278
n@1118 279 This is where the \texttt{interface}'s \texttt{name} attribute is particularly important: use this to retrieve the rating values, comments and metrics associated with the specified interface.
n@1118 280 If none is given, you can still use the automatically given \texttt{interface-id}, which is the interface number starting with 0 and corresponding to the order in which the rating scales appear.
n@1118 281
n@1118 282
n@1118 283 \subsection{Surveys}
n@1118 284 Surveys are conducted through an in-page popup window which can collect data using various HTML functions, see Survey elements below for a list. Survey questions are placed into the \texttt{<pretest>} or \texttt{<posttest>} nodes. Appending these nodes to the \texttt{<setup>} node will have the survey options appear before any test pages (if in the \texttt{<pretest>} node) or after all test pages. Placing the survey options in the \texttt{<audioholder>} node will have them appear before or after the test page they are a child of.
n@1118 285 \subsubsection{Survey elements}
n@1118 286 All survey elements (which `pop up' in the centre of the browser) have an \texttt{id} attribute, for retrieval of the responses in post-processing of the results, and a \texttt{mandatory} attribute, which if set to ``true'' requires the subjects to respond before they can continue.
n@1118 287
n@1118 288 \begin{description}
n@1118 289 \item[statement] Simply shows text to the subject until `Next' or `Start' is clicked.
n@1118 290 \item[question] Expects a text answer (in a text box). Has the \texttt{boxsize} argument: set to ``large'' or ``huge'' for a bigger box size, or ``small'' for small.
n@1118 291 \item[number] Only accepts a numerical value. Attribute \texttt{min="0"} specifies the minimum value - in this case the answer must be stricly positive before the subject can continue.
n@1118 292 \item[radio] Radio buttons. Presents a list of options to the user using radio buttons, where only one option from the list can be selected.
n@1118 293 \item[checkbox] Checkboxes. Note that when making a checkbox question ``mandatory'', the subject is forced to select at least one option (which could be e.g. `Other' or `None').\\
n@1118 294 \end{description}
n@1118 295
n@1118 296 \textbf{Example usage:}\\
n@1118 297
n@1118 298 \lstset{
n@1118 299 basicstyle=\ttfamily,
n@1118 300 columns=fullflexible,
n@1118 301 showstringspaces=false,
n@1118 302 commentstyle=\color{grey}\upshape
n@1118 303 }
n@1118 304
n@1118 305 \lstdefinelanguage{XML}
n@1118 306 {
n@1118 307 morestring=[b]",
n@1118 308 morestring=[s]{>}{<},
n@1118 309 morecomment=[s]{<?}{?>},
n@1118 310 stringstyle=\color{black} \bfseries,
n@1118 311 identifierstyle=\color{darkblue} \bfseries,
n@1118 312 keywordstyle=\color{cyan} \bfseries,
n@1118 313 morekeywords={xmlns,version,type},
n@1118 314 breaklines=true% list your attributes here
n@1118 315 }
n@1118 316 \scriptsize
n@1118 317 \lstset{language=XML}
n@1118 318
n@1118 319 \begin{lstlisting}
n@1118 320 <PostTest>
n@1118 321 <question id="location" mandatory="true" boxsize="large">Please enter your location. (example mandatory text question)</question>
n@1118 322 <number id="age" min="0">Please enter your age (example non-mandatory number question)</number>
n@1118 323 <radio id="rating">
n@1118 324 <statement>Please rate this interface (example radio button question)</statement>
n@1118 325 <option name="bad">Bad</option>
n@1118 326 <option name="poor">Poor</option>
n@1118 327 <option name="good">Good</option>
n@1118 328 <option name="great">Great</option>
n@1118 329 </radio>
n@1118 330 <checkbox id="background" mandatory="true">
n@1118 331 <statement>Please select with which activities you have any experience (example checkbox question)</statement>
n@1118 332 <option name="musician">Playing a musical instrument</option>
n@1118 333 <option name="soundengineer">Recording or mixing audio</option>
n@1118 334 </checkbox>
n@1118 335 <statement>Thank you for taking this listening test. Please click 'Submit' and your results will appear in the 'saves/' folder.</statement>
n@1118 336 </PostTest>
n@1118 337 \end{lstlisting}
n@1118 338
n@1118 339
n@1118 340
n@1118 341 \subsection{Randomisation}
n@1118 342 [WORK IN PROGRESS]
n@1118 343
n@1118 344 \subsubsection{Randomisation of configuration XML files}
n@1118 345 The python server has a special function to automatically cycle through a list of test pages. Instead of directly requesting an XML, simply setting the url item in the browser URL to \texttt{pseudo.xml} will cycle through a list of XMLs. These XMLs must be in the local directory called \texttt{pseudo}.
n@1118 346 % how to
n@1118 347 % explain how this is implemented in the pythonServer
n@1118 348 %Nick? already implemented in the PHP?
n@1118 349 % Needs to be implemented in PHP and automated better, will complete soon
n@1118 350
n@1118 351
n@1118 352 \subsubsection{Randomsation of page order}
n@1118 353 The page order randomisation is set by the \texttt{<setup>} node attribute \texttt{randomise-order}, for example \texttt{<setup ... randomise-order="true">...</setup>} will randomise the test page order. When not set, the default is to \textbf{not} randomise the test page order.
n@1118 354
n@1118 355 \subsubsection{Randomisation of axis order}
n@1118 356
n@1118 357 \subsubsection{Randomisation of fragment order}
n@1118 358 The audio fragment randomisation is set by the \texttt{<audioholder>} node attribute \texttt{randomise-order}, for example \texttt{<audioholder ... randomise-order="true">...</audioholder>} will randomise the test page order. When not set, the default is to \textbf{not} randomise the test page order.
n@1118 359
n@1118 360 \subsubsection{Randomisation of initial slider position}
n@1118 361 By default slider values are randomised on start. The MUSHRA interface supports setting the initial values of all sliders throught the \texttt{<audioholder>} attribute \texttt{initial-position}. This takes an integer between 0 and 100 to signify the slider position.
n@1118 362 % /subsubsection{Randomisation of survey question order}
n@1118 363 % should be an attribute of the individual 'pretest' and 'posttest' elements
n@1118 364 % uncomment once we have it
n@1118 365
n@1118 366 \subsection{Looping}
n@1118 367 Looping enables the fragments to loop until stopped by the user. Looping is synchronous between samples so all samples start at the same time.
n@1118 368 Individual test pages can have their playback looped by the \texttt{<audioholder>} attribute \texttt{loop} with a value of "true" or "false".
n@1118 369 If the fragments are not of equal length initially, they are padded with zeros so that they are equal length, to enable looping without the fragments going out of sync relative to each other.
n@1118 370
n@1118 371 Note that fragments cannot be played until all page fragments are loaded when in looped mode, as the engine needs to know the amount to pad the fragments.
n@1118 372
n@1118 373 \subsection{Sample rate}
n@1118 374 If you require the test to be conducted at a certain sample rate (i.e. you do not tolerate resampling of the elements to correspond with the system's sample rate), add \texttt{sampleRate="96000"} - where ``96000'' can be any support sample rate - so that a warning message is shown alerting the subject the system's sample rate is different from this enforced sample rate. This of course means that in one test, all sample rates must be equal as it is impossible to change the system's sample rates during the test (even if you were to manually change it, then the browser must be restarted for it to take effect).
n@1118 375
n@1118 376 \subsection{Scrubber bar}
n@1118 377 The scrubber bar, or transport bar (that is the name of the visualisation of the playhead thing with an indication of time and showing the portion of the file played so far) is at this point just a visual, and not a controller to adjust the playhead position.
n@1118 378
n@1118 379 Make visible by adding \texttt{<option name='playhead'/>} to the \texttt{interface} node (see Section \ref{sec:checks}: Checks).
n@1118 380
n@1118 381 \subsection{Metrics}
n@1118 382 Enable the collection of metrics by adding \texttt{collectMetrics=`true'} in the \texttt{setup} node. % Should this always be on??
n@1118 383
n@1118 384 The \texttt{Metric} node, which contains the metrics to be tracked during the complete test, is a child of the \texttt{setup} node, and it could look as follows.
n@1118 385
n@1118 386 \begin{lstlisting}
n@1118 387 <Metric>
n@1118 388 <metricEnable>testTimer</metricEnable>
n@1118 389 <metricEnable>elementTimer</metricEnable>
n@1118 390 <metricEnable>elementInitialPosition</metricEnable>
n@1118 391 <metricEnable>elementTracker</metricEnable>
n@1118 392 <metricEnable>elementFlagListenedTo</metricEnable>
n@1118 393 <metricEnable>elementFlagMoved</metricEnable>
n@1118 394 <metricEnable>elementListenTracker</metricEnable>
n@1118 395 </Metric>
n@1118 396 \end{lstlisting}
n@1118 397
n@1118 398 When in doubt, err on the inclusive side, as one never knows which information is needed in the future. Most of these metrics are necessary for post-processing scripts such as timeline\_view\_movement.py.
n@1118 399
n@1118 400 \subsubsection{Time test duration}
n@1118 401 \texttt{testTimer}\\
n@1118 402 One per test page. Presents the total test time from the first playback on the test page to the submission of the test page (exculding test time of the pre-/post- test surveys). This is presented in the results as \texttt{<metricresult id="testTime"> 8.60299319727892 </metricresult>}. The time is in seconds.
n@1118 403
n@1118 404 \subsubsection{Time fragment playback}
n@1118 405 \texttt{elementTimer}\\
n@1118 406 One per audio fragment per test page. This totals up the entire time the audio fragment has been listened to in this test and presented \texttt{<metricresult name="enableElementTimer"> 1.0042630385487428 </metricresult>}. The time is in seconds.
n@1118 407
n@1118 408 \subsubsection{Initial positions}
n@1118 409 \texttt{elementInitialPosition}\\
n@1118 410 One per audio fragment per test page. Tracks the initial position of the sliders, especially relevant when these are randomised. Example result \texttt{<metricresult name="elementInitialPosition"> 0.8395522388059702 </metricresult>}.
n@1118 411
n@1118 412 \subsubsection{Track movements}
n@1118 413 \texttt{elementTracker}\\
n@1118 414 One per audio fragment per test page. Tracks the movement of each interface object. Each movement event has the time it occured at and the new value.
n@1118 415 \subsubsection{Which fragments listened to}
n@1118 416 \texttt{elementFlagListenedTo}\\
n@1118 417 One per audio fragment per test page. Boolean response, set to true if listened to.
n@1118 418 \subsubsection{Which fragments moved}
n@1118 419 \texttt{elementFlagMoved}\\
n@1118 420 One per audio fragment per test page. Binary check whether or not a the marker corresponding with a particular fragment was moved at all throughout the experiment.
n@1118 421
n@1118 422 \subsubsection{elementListenTracker}
n@1118 423 \texttt{elementListenTracker}\\
n@1118 424 One per audio fragment per test page. Tracks the playback events of each audio element pairing both the time in the test when playback started and when it stopped, it also gives the buffertime positions.
n@1118 425
n@1118 426 \subsection{References and anchors}
n@1118 427 The audio elements, \texttt{<audioelement>} have the attribute \texttt{type}, which defaults to normal. Setting this to one of the following will have the following effects.
n@1118 428 \subsubsection{Outside Reference}
n@1118 429 Set type to 'outside-reference'. This will place the object in a separate playback element clearly labelled as an outside reference. This is exempt of any movement checks but will still be included in any listening checks.
n@1118 430 \subsubsection{Hidden reference}
n@1118 431 Set type to 'reference'. The element will still be randomised as normal (if selected) and presented to the user. However the element will have the 'reference' type in the results to quickly find it. The reference can be forced to be below a value before completing the test page by setting the attribute 'marker' to be a value between 0 and 100 representing the integer value position it must be equal to or above.
n@1118 432 \subsubsection{Hidden anchor}
n@1118 433 Set type to 'anchor'. The element will still be randomised as normal (if selected) and presented to the user. However the element will have the 'anchor' type in the results to quickly find it. The anchor can be forced to be below a value before completing the test page by setting the attribute 'marker' to be a value between 0 and 100 representing the integer value position it must be equal to or below.
n@1118 434
n@1118 435 \subsection{Checks}
n@1118 436 \label{sec:checks}
n@1118 437
n@1118 438 %blabla
n@1118 439 These checks are enabled in the \texttt{interface} node, which is a child of the \texttt{setup} node.
n@1118 440 \subsubsection{Playback checks}
n@1118 441 % what it does/is
n@1118 442 Enforce playing each sample at least once, for at least a little bit (e.g. this test is satisfied even if you only play a tiny portion of the file), by alerting the user to which samples have not been played upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before clicking each sample at least once.
n@1118 443 % how to enable/disable
n@1118 444
n@1118 445 Alternatively, one can check whether the \emph{entire} fragment was listened to at least once.
n@1118 446 % how to enable
n@1118 447
n@1118 448 Add \texttt{<check name="fragmentPlayed"/>} to the \texttt{interface} node.
n@1118 449
n@1118 450
n@1118 451 \subsubsection{Movement check}
n@1118 452 Enforce moving each sample at least once, for at least a little bit (e.g. this test is satisfied even if you only play a tiny portion of the file), by alerting the user to which samples have not been played upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before clicking each sample at least once.
n@1118 453 If there are several axes, the warning will specify which samples have to be moved on which axis.
n@1118 454
n@1118 455 Add \texttt{<check name="fragmentMoved"/>} to the \texttt{interface} node.
n@1118 456
n@1118 457 \subsubsection{Comment check}
n@1118 458 % How to enable/disable?
n@1118 459
n@1118 460 Enforce commenting, by alerting the user to which samples have not been commented on upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before putting at least one character in each comment box.
n@1118 461
n@1118 462 Note that this does not apply to any extra (text, radio button, checkbox) elements, unless these have the `mandatory' option enabled. %Nick? is this extra 'mandatory' option implemented?
n@1118 463
n@1118 464 Add \texttt{<check name="fragmentComments"/>} to the \texttt{interface} node.
n@1118 465
n@1118 466 %ADD: how to add a custom comment box
n@1118 467
n@1118 468 \subsubsection{Scale use check}
n@1118 469 It is possible to enforce a certain usage of the scale, meaning that at least one slider needs to be below and/or above a certain percentage of the slider.
n@1118 470
n@1118 471 Add \texttt{<check name="scalerange" min="25" max="75"/>} to the \texttt{interface} node.
n@1118 472
n@1118 473 \subsubsection{Note on the use of multiple rating axes}
n@1118 474 I.e. what if more than one axis? How to specify which axis the checks relate to? %Nick? to add?
n@1118 475
n@1118 476 \subsection{Platform information}
n@1118 477 % what does it do, what does it look like
n@1118 478 % limitations?
n@1118 479 For troubleshooting and usage statistics purposes, information about the browser and the operating system is logged in the results XML file. This is especially useful in the case of remote tests, when it is not certain which operating system, browser and/or browser were used. Note that this information is not always available and/or accurate, e.g. when the subject has taken steps to be more anonymous, so it should be treated as a guide only.
n@1118 480
n@1118 481 Example:
n@1118 482 \begin{lstlisting}
n@1118 483 <navigator>
n@1118 484 <platform>MacIntel</platform>
n@1118 485 <vendor>Google Inc.</vendor>
n@1118 486 <uagent>Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36</uagent>
n@1118 487 </navigator>
n@1118 488 \end{lstlisting}
n@1118 489
n@1118 490 \subsection{Show progress}
n@1118 491 Add \texttt{<option name="page-count"/>} to the \texttt{interface} node (see Section \ref{sec:checks}: Checks) to add the current page number and the total number of pages to the interface.
n@1118 492
n@1118 493 \subsection{Gain}
n@1118 494 It is possible to set the gain (in decibel) applied to the different audioelements, as an attribute of the \texttt{audioelement} nodes in the configuration XML file:
n@1118 495
n@1118 496 \texttt{<audioElements url="sample-01.wav" gain="-6" id="sample01quieter" />}\\
n@1118 497 Please note, there are no checks on this to detect if accidentaly typed in linear.
n@1118 498
n@1118 499 \subsection{Loudness}
n@1118 500 % automatic loudness equalisation
n@1118 501 % guide to loudness.js
n@1118 502 Each audio fragment on loading has its loudness calculated. The tool uses the EBU R 128 recommendation following the ITU-R BS.1770-4 loduness calculations to return the integreated LUFS loudness. The attribute \texttt{loudness} will set the loudness from the scope it is applied in. Applying it in the \texttt{<setup>} node will set the loudness for all test pages. Applying it in the \texttt{<audioholder>} node will set the loudness for that page. Applying it in the \texttt{<audioelement>} node will set the loudness for that fragment. The scope is set locally, so if there is a loudness on both the \texttt{<audioholder>} and \texttt{<setup>} nodes, that test page will take the value associated with the \texttt{<audioholder>}. The loudness attribute is set in LUFS
n@1118 503
n@1118 504 \clearpage
n@1118 505
n@1118 506
n@1118 507 \section{Using the test create tool}
n@1118 508 We provide a test creation tool, available in the directory test\_create. This tool is a self-contained web page, so doubling clicking will launch the page in your system default browser.
n@1118 509
n@1118 510 The test creation tool can help you build a simple test very quickly. By simply selecting your interface and clicking check-boxes you can build a test in minutes.
n@1118 511
n@1118 512 Include audio by dragging and dropping the stimuli you wish to include.
n@1118 513
n@1118 514 The tool examines your XML before exporting to ensure you do not export an invalid XML structure which would crash the test.
n@1118 515
n@1118 516 This guide will help you to construct your own interface on top of the WAET (Web Audio Evaluation Tool) engine. The WAET engine resides in the core.js file, this contains prototype objects to handle most of the test creation, operation and data collection. The interface simply has to link into this at the correct points.
n@1118 517
n@1118 518 \section{Building your own interface}
n@1118 519
n@1118 520 \subsection{Nodes to familiarise}
n@1118 521 Core.js handles several very important nodes which you should become familiar with. The first is the Audio Engine, initialised and stored in variable `AudioEngineContext'. This handles the playback of the web audio nodes as well as storing the `AudioObjects'. The `AudioObjects' are custom nodes which hold the audio fragments for playback. These nodes also have a link to two interface objects, the comment box if enabled and the interface providing the ranking. On creation of an `AudioObject' the interface link will be nulled, it is up to the interface to link these correctly.
n@1118 522
n@1118 523 The specification document will be decoded and parsed into an object called `specification'. This will hold all of the specifications various nodes. The test pages and any pre/post test objects are processed by a test state which will proceed through the test when called to by the interface. Any checks (such as playback or movement checks) are to be completed by the interface before instructing the test state to proceed. The test state will call the interface on each page load with the page specification node.
n@1118 524
n@1118 525 \subsection{Modifying \texttt{core.js}}
n@1118 526 Whilst there is very little code actually needed, you do need to instruct core.js to load your interface file when called for from a specification node. There is a function called `loadProjectSpecCallback' which handles the decoding of the specification and setting any external items (such as metric collection). At the very end of this function there is an if statement, add to this list with your interface string to link to the source. There is an example in there for both the APE and MUSHRA tests already included. Note: Any updates to core.js in future work will most likely overwrite your changes to this file, so remember to check your interface is still here after any update that interferes with core.js.
n@1118 527 Any further files can be loaded here as well, such as css styling files. jQuery is already included.
n@1118 528
n@1118 529 \subsection{Building the Interface}
n@1118 530 Your interface file will get loaded automatically when the `interface' attribute of the setup node matches the string in the `loadProjectSpecCallback' function. The following functions must be defined in your interface file.
n@1118 531 \begin{itemize}
n@1118 532 \item \texttt{loadInterface} - Called once when the document is parsed. This creates any necessary bindings, such as to the metric collection classes and any check commands. Here you can also start the structure for your test such as placing in any common nodes (such as the title and empty divs to drop content into later).
n@1118 533 \item \texttt{loadTest(audioHolderObject)} - Called for each page load. The audioHolderObject contains a specification node holding effectively one of the audioHolder nodes.
n@1118 534 \item \texttt{resizeWindow(event)} - Handle for any window resizing. Simply scale your interface accordingly. This function must be here, but can me an empty function call.
n@1118 535 \end{itemize}
n@1118 536
n@1118 537 \subsubsection{loadInterface}
n@1118 538 This function is called by the interface once the document has been parsed since some browsers may parse files asynchronously. The best method is simply to put `loadInterface()' at the top of your interface file, therefore when the JavaScript engine is ready the function is called.
n@1118 539
n@1118 540 By default the HTML file has an element with id ``topLevelBody'' where you can build your interface. Make sure you blank the contents of that object. This function is the perfect time to build any fixed items, such as the page title, session titles, interface buttons (Start, Stop, Submit) and any holding and structure elements for later on.
n@1118 541
n@1118 542 At the end of the function, insert these two function calls: testState.initialise() and testState.advanceState();. This will actually begin the test sequence, including the pre-test options (if any are included in the specification document).
n@1118 543
n@1118 544 \subsubsection{loadTest(audioHolderObject)}
n@1118 545 This function is called on each new test page. It is this functions job to clear out the previous test and set up the new page. Use the function audioEngineContext.newTestPage(); to instruct the audio engine to prepare for a new page. ``audioEngineContext.audioObjects = [];'' will delete any audioObjects, interfaceContext.deleteCommentBoxes(); will delete any comment boxes and interfaceContext.deleteCommentQuestions(); will delete any extra comment boxes specified by commentQuestion nodes.
n@1118 546
n@1118 547 This function will need to instruct the audio engine to build each fragment. Just passing the constructor each element from the audioHolderObject will build the track, audioEngineContext.newTrack(element) (where element is the audioHolderObject audio element). This will return a reference to the constructed audioObject. Decoding of the audio will happen asynchronously.
n@1118 548
n@1118 549 You also need to link audioObject.interfaceDOM with your interface object for that audioObject. The interfaceDOM object has a few default methods. Firstly it must start disabled and become enabled once the audioObject has decoded the audio (function call: enable()). Next it must have a function exportXMLDOM(), this will return the xml node for your interface, however the default is for it to return a value node, with textContent equal to the normalised value. You can perform other functions, but our scripts may not work if something different is specified (as it will breach our results specifications). Finally it must also have a method getValue, which returns the normalised value.
n@1118 550
n@1118 551 It is also the job the interfaceDOM to call any metric collection functions necessary, however some functions may be better placed outside (for example, the APE interface uses drag and drop, therefore the best way was to call the metric functions from the dragEnd function, which is called when the interface object is dropped). Metrics based upon listening are handled by the audioObject. The interfaceDOM object must manage any movement metrics. For a list of valid metrics and their behaviours, look at the project specification document included in the repository/docs location. The same goes for any checks required when pressing the submit button, or any other method to proceed the test state.
n@1118 552
n@1118 553 \clearpage
n@1118 554 \section{Analysis and diagnostics}
n@1118 555 \subsection{In the browser}
n@1118 556 See `analysis.html' in the main folder: immediate visualisation of (by default) all results in the `saves/' folder.
n@1118 557
n@1118 558 \subsection{Python scripts}
n@1118 559 The package includes Python (2.7) scripts (in `scripts/') to extract ratings and comments, generate visualisations of ratings and timelines, and produce a fully fledged report.
n@1118 560
n@1118 561 Visualisation requires the free matplotlib toolbox (http://matplotlib.org), numpy and scipy.
n@1118 562 By default, the scripts can be run from the `scripts' folder, with the result files in the `saves' folder (the default location where result XMLs are stored). Each script takes the XML file folder as an argument, along with other arguments in some cases.
n@1118 563 Note: to avoid all kinds of problems, please avoid using spaces in file and folder names (this may work on some systems, but others don't like it).
n@1118 564
n@1118 565 \subsubsection{comment\_parser.py}
n@1118 566 Extracts comments from the output XML files corresponding with the different subjects found in `saves/'. It creates a folder per `audioholder'/page it finds, and stores a CSV file with comments for every `audioelement'/fragment within these respective `audioholders'/pages. In this CSV file, every line corresponds with a subject/output XML file. Depending on the settings, the first column containing the name of the corresponding XML file can be omitted (for anonymisation).
n@1118 567 Beware of Excel: sometimes the UTF-8 is not properly imported, leading to problems with special characters in the comments (particularly cumbersome for foreign languages).
n@1118 568
n@1118 569 \subsubsection{evaluation\_stats.py}
n@1118 570 Shows a few statistics of tests in the `saves/' folder so far, mainly for checking for errors. Shows the number of files that are there, the audioholder IDs that were tested (and how many of each separate ID), the duration of each page, the duration of each complete test, the average duration per page, and the average duration in function of the page number.
n@1118 571
n@1118 572 \subsubsection{generate\_report.py}
n@1118 573 Similar to `evaluation\_stats.py', but generates a PDF report based on the output files in the `saves/' folder - or any folder specified as command line argument. Uses pdflatex to write a LaTeX document, then convert to a PDF.
n@1118 574
n@1118 575 \subsubsection{score\_parser.py}
n@1118 576 Extracts rating values from the XML to CSV - necessary for running visualisation of ratings. Creates the folder `saves/ratings/' if not yet created, to which it writes a separate file for every `audioholder'/page in any of the output XMLs it finds in `saves/'. Within each file, rows represent different subjects (output XML file names) and columns represent different `audioelements'/fragments.
n@1118 577
n@1118 578 \subsubsection{score\_plot.py}
n@1118 579 Plots the ratings as stored in the CSVs created by score\_parser.py
n@1118 580 Depending on the settings, it displays and/or saves (in `saves/ratings/') a boxplot, confidence interval plot, scatter plot, or a combination of the aforementioned.
n@1118 581 Requires the free matplotlib library.
n@1118 582 At this point, more than one subjects are needed for this script to work.
n@1118 583
n@1118 584 \subsubsection{timeline\_view\_movement.py}
n@1118 585 Creates a timeline for every subject, for every `audioholder'/page, corresponding with any of the output XML files found in `saves/'. It shows the marker movements of the different fragments, along with when each fragment was played (red regions). Automatically takes fragment names, rating axis title, rating axis labels, and audioholder name from the XML file (if available).
n@1118 586
n@1118 587 \subsubsection{timeline\_view.py} % should be omitted or absorbed by the above soon
n@1118 588 Creates a timeline for every subject, for every `audioholder'/page, corresponding with any of the output XML files found in `saves/'. It shows when and for how long the subject listened to each of the fragments.
n@1118 589
n@1118 590
n@1118 591
n@1118 592 \clearpage
n@1118 593 \section{Troubleshooting} \label{sec:troubleshooting}
n@1118 594 \subsection{Reporting bugs and requesting features}
n@1118 595 Thanks to feedback from using the interface in experiments by the authors and others, many bugs have been caught and fatal crashes due to the interface seem to be a thing of the past entirely.
n@1118 596
n@1118 597 We continually develop this tool to fix issues and implement features useful to us or our user base. See \url{https://code.soundsoftware.ac.uk/projects/webaudioevaluationtool/issues} for a list of feature requests and bug reports, and their status.
n@1118 598
n@1118 599 Please contact the authors if you experience any bugs, if you would like additional functionality, if you spot any errors or gaps in the documentation, if you have questions about using the interface, or if you would like to give any feedback (even positive!) about the interface. We look forward to learning how the tool has (not) been useful to you.
n@1118 600
n@1118 601
n@1118 602 \subsection{First aid}
n@1118 603 Meanwhile, if things do go wrong or the test needs to be interrupted for whatever reason, all data is not lost. In a normal scenario, the test needs to be completed until the end (the final `Submit'), at which point the output XML is stored in the \texttt{saves/}. If this stage is not reached, open the JavaScript Console (see below for how to find it) and type
n@1118 604
n@1118 605 \texttt{createProjectSave()}
n@1118 606
n@1118 607 to present the result XML file on the client side, or
n@1118 608
n@1118 609 \texttt{createProjectSave(specification.projectReturn)}
n@1118 610
n@1118 611 to try to store it to the specified location, e.g. the `saves/' folder on the web server or the local machine (on failure the result XML should be presented directly in the web browser instead)
n@1118 612
n@1118 613 and hit enter. This will open a pop-up window with a hyperlink that reads `Save File'; click it and an XML file with results until that point should be stored in your download folder.
n@1118 614
n@1118 615 Alternatively, a lot of data can be read from the same console, in which the tool prints a lot of debug information. Specifically:
n@1118 616 \begin{itemize}
n@1118 617 \item the randomisation of pages and fragments are logged;
n@1118 618 \item any time a slider is played, its ID and the time stamp (in seconds since the start of the test) are displayed;
n@1118 619 \item any time a slider is dragged and dropped, the location where it is dropped including the time stamp are shown;
n@1118 620 \item any comments and pre- or post-test questions and their answers are logged as well.
n@1118 621 \end{itemize}
n@1118 622
n@1118 623 You can select all this and save into a text file, so that none of this data is lost. You may to choose to do this even when a test was successful as an extra precaution.
n@1118 624
n@1118 625 If you encounter any issue which you believe to be caused by any aspect of the tool, and/or which the documentation does not mention, please do let us know!
n@1118 626
n@1118 627 \subsubsection*{Opening the JavaScript Console}
n@1118 628 \begin{itemize}
n@1118 629 \item In Google Chrome, the JavaScript Console can be found in \textbf{View$>$Developer$>$JavaScript Console}, or via the keyboard shortcut Cmd + Alt + J (Mac OS X).
n@1118 630 \item In Safari, the JavaScript Console can be found in \textbf{Develop$>$Show Error Console}, or via the keyboard shortcut Cmd + Alt + C (Mac OS X). Note that for the Developer menu to be visible, you have to go to Preferences (Cmd + ,) and enable `Show Develop menu in menu bar' in the `Advanced' tab. \textbf{Note that as long as the Developer menu is not visible, nothing is logged to the console, i.e. you will only be able to see diagnostic information from when you switched on the Developer tools onwards.}
n@1118 631 \item In Firefox, go to \textbf{Tools$>$Web Developer$>$Web Console}, or hit Cmd + Alt + K.
n@1118 632 \end{itemize}
n@1118 633
n@1118 634 \subsection{Known issues and limitations}
n@1118 635 \label{sec:knownissues}
n@1118 636
n@1118 637 The following is a non-exhaustive list of problems and limitations you may experience using this tool, due to not being supported yet by us, or by the Web Audio API and/or (some) browsers.
n@1118 638
n@1118 639 \begin{itemize}
n@1118 640 \item Issue \href{https://code.soundsoftware.ac.uk/issues/1463}{\textbf{\#1463}}: \textbf{Firefox} only supports 8 bit and 16 bit WAV files. Pending automatic requantisation (which deteriorates the audio signal's dynamic range to some extent), WAV format stimuli need to adhere to these limitations in order for the test to be compatible with Firefox.
n@1118 641 \item Issues \href{https://code.soundsoftware.ac.uk/issues/1474}{\textbf{\#1474}} and \href{https://code.soundsoftware.ac.uk/issues/1462}{\textbf{\#1462}}: On occasions, audio is not working - or only a continuous `beep' can be heard - notably in \textbf{Safari}. Refreshing, quitting the browser and even enabling Developer tools in Safari's Preferences pane (`Advanced' tab: ``Show `Develop' menu in menu bar'') has helped resolve this. If no (high quality) audio can be heard, make sure your entire playback system's settings are all correct.
n@1118 642 \end{itemize}
n@1118 643
n@1118 644 \clearpage
n@1118 645 \bibliographystyle{ieeetr}
n@1118 646 \bibliography{Instructions}{}
n@1118 647
n@1118 648
n@1118 649 \clearpage
n@1118 650 \appendix
n@1118 651
n@1118 652 \section{Legacy}
n@1118 653 The APE interface and most of the functionality of the first WAET editions are inspired by the APE toolbox for MATLAB \cite{ape}. See \url{https://code.soundsoftware.ac.uk/projects/ape} for the source code and \url{http://brechtdeman.com/publications/aes136.pdf} for the corresponding paper.
n@1118 654
n@1118 655 \clearpage
n@1118 656
n@1118 657 \section{Listening test instructions example}
n@1118 658
n@1118 659 Before each test, show the instructions below or similar and make sure it is available to the subject throughout the test. Make sure to ask whether the participant has any questions upon seeing and/or reading the instructions.
n@1118 660
n@1118 661 \begin{itemize}
n@1118 662 \item You will be asked for your name (``John Smith'') and location (room identifier).
n@1118 663 \item An interface will appear, where you are asked to
n@1118 664 \begin{itemize}
n@1118 665 \item click green markers to play the different mixes;
n@1118 666 \item drag the markers on a scale to reflect your preference for the mixes;
n@1118 667 \item comment on these mixes, using text boxes with corresponding numbers (in your \textbf{native language});
n@1118 668 \item optionally comment on all mixes together, or on the song, in `General comments'.
n@1118 669 \end{itemize}
n@1118 670 \item You are asked for your personal, honest opinion. Feel free to use the full range of the scale to convey your opinion of the various mixes. Don?t be afraid to be harsh and direct.
n@1118 671 \item The markers appear at random positions at first (which means some markers may hide behind others).
n@1118 672 \item The interface can take a few seconds to start playback, but switching between mixes should be instantaneous.
n@1118 673 \item This is a research experiment, so please forgive us if things go wrong. Let us know immediately and we will fix it or restart the test.
n@1118 674 \item When the test is finished (after all songs have been evaluated), just call the experimenter, do NOT close the window.
n@1118 675 \item After the test, please fill out our survey about your background, experience and feedback on the test.
n@1118 676 \item By participating, you consent to us using all collected data for research. Unless asked explicitly, all data will be anonymised when shared.
n@1118 677 \end{itemize}
n@1118 678
n@1118 679 \clearpage
n@1118 680
n@1118 681 \section{Terminology} % just to keep track of what exactly we call things. Don't use terms that are too different, to avoid confusion.
n@1118 682 As a guide to better understand the Instructions, and to expand them later, here is a list of terms that may be unclear or ambiguous unless properly defined.
n@1118 683 \begin{description}
n@1118 684 \item[Subject] The word we use for a participant, user, ... of the test, i.e. not the experimenter who designs the test but the person who evaluates the audio under test as part of an experiment (or the preparation of one).
n@1118 685 \item[User] The person who uses the tool to configure, run and analyse the test - i.e. the experimenter, most likely a researcher - or at least
n@1118 686 \item[Page] A screen in a test; corresponds with an \texttt{audioholder}
n@1118 687 \item[Fragment] An element, stimulus or sample in a test; corresponds with an \texttt{audioelement}
n@1118 688 \item[Test] A complete test which can consist of several pages; corresponds with an entire configuration XML file
n@1118 689 \item[Configuration XML file] The XML file containing the necessary information on interface, samples, survey questions, configurations, ... which the JavaScript modules read to produce the desired test.
n@1118 690 \item[Results XML file] The output of a successful test, including ratings, comments, survey responses, timing information, and the complete configuration XML file with which the test was generated in the first place.
n@1118 691 \end{description}
n@1118 692
n@1118 693 \clearpage
n@1118 694
n@1118 695 \setcounter{secnumdepth}{0} % don't number this last bit
n@1118 696 \section{Contact details} % maybe add web pages, Twitter accounts, whatever you like
n@1118 697 \label{sec:contact}
n@1118 698
n@1118 699 \begin{itemize}
n@1118 700 \item Nicholas Jillings: \texttt{nicholas.jillings@mail.bcu.ac.uk}
n@1118 701 \item Brecht De Man: \texttt{b.deman@qmul.ac.uk}
n@1118 702 \item David Moffat: \texttt{d.j.moffat@qmul.ac.uk}
n@1118 703 \end{itemize}
n@1118 704
n@1118 705 \end{document}