annotate docs/Instructions/Instructions.tex @ 429:9bf8ecbcdc8a Dev_main

Index page now links to example APE project, example MUSHRA project, test creator, analysis page, citing info, GNU license, and instructions. Instructions and example project contain info on checkboxes.
author Brecht De Man <b.deman@qmul.ac.uk>
date Fri, 18 Dec 2015 18:26:46 +0000
parents 1c5894cdcb9c
children d9c7263cb871
rev   line source
b@378 1 \documentclass[11pt, oneside]{article} % use "amsart" instead of "article" for AMSLaTeX format
b@378 2 \usepackage{geometry} % See geometry.pdf to learn the layout options. There are lots.
b@378 3 \geometry{letterpaper} % ... or a4paper or a5paper or ...
b@378 4 %\geometry{landscape} % Activate for rotated page geometry
b@378 5 \usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent
b@378 6 \usepackage{graphicx} % Use pdf, png, jpg, or eps§ with pdflatex; use eps in DVI mode
b@378 7 % TeX will automatically convert eps --> pdf in pdflatex
b@378 8
b@378 9 \usepackage{listings} % Source code
b@413 10 \usepackage{xcolor} % colour (source code for instance)
b@413 11 \definecolor{grey}{rgb}{0.1,0.1,0.1}
b@413 12 \definecolor{darkblue}{rgb}{0.0,0.0,0.6}
b@413 13 \definecolor{cyan}{rgb}{0.0,0.6,0.6}
b@413 14
b@378 15 \usepackage{amssymb}
b@378 16 \usepackage{cite}
b@378 17 \usepackage{hyperref} % Hyperlinks
b@378 18 \usepackage[nottoc,numbib]{tocbibind} % 'References' in TOC
b@378 19
b@378 20 \graphicspath{{img/}} % Relative path where the images are stored.
b@378 21
b@378 22 \title{Instructions for \\ Web Audio Evaluation Tool}
b@378 23 \author{Nicholas Jillings, Brecht De Man and David Moffat}
b@378 24 \date{7 December 2015} % Activate to display a given date or no date
b@378 25
b@378 26 \begin{document}
b@378 27 \maketitle
b@378 28
b@413 29 These instructions are about use of the Web Audio Evaluation Tool on Windows and Mac OS X platforms.
b@413 30
b@413 31 We request that you acknowledge the authors and cite our work when using it \cite{waet}, see also CITING.txt.
b@413 32
b@413 33 The tool is available in its entirety including source code on \url{https://code.soundsoftware.ac.uk/projects/webaudioevaluationtool/}, under the GNU General Public License v3.0 (\url{http://choosealicense.com/licenses/gpl-3.0/}), see also LICENSE.txt.
b@413 34
b@429 35 % TO DO: Linux (Android, iOS)
b@378 36
b@378 37 \tableofcontents
b@378 38
b@378 39 \clearpage
b@378 40
b@378 41 \section{Installation}
b@413 42 Download the folder (\url{https://code.soundsoftware.ac.uk/hg/webaudioevaluationtool/archive/tip.zip}) and unzip in a location of your choice, or pull the source code from \url{https://code.soundsoftware.ac.uk/hg/webaudioevaluationtool} (Mercurial).
b@378 43
b@378 44 \subsection{Contents}
b@378 45 The folder should contain the following elements: \\
b@378 46
b@378 47 \textbf{Main folder:}
b@378 48 \begin{itemize}
b@378 49 \item \texttt{analyse.html}: analysis and diagnostics of a set of result XML files
b@378 50 \item \texttt{ape.css, core.css, graphics.css, mushra.css, structure.css}: style files (edit to change appearance)
b@378 51 \item \texttt{ape.js}: JavaScript file for APE-style interface \cite{ape}
b@413 52 \item \texttt{CITING.txt, LICENSE.txt, README.txt}: text files with, respectively, the citation which we ask to include in any work where this tool or any portion thereof is used, modified or otherwise; the license under which the software is shared; and a general readme file referring to these instructions.
b@378 53 \item \texttt{core.js}: JavaScript file with core functionality
b@378 54 \item \texttt{index.html}: webpage where interface should appear (includes link to test configuration XML)
b@378 55 \item \texttt{jquery-2.1.4.js}: jQuery JavaScript Library
b@413 56 \item \texttt{loudness.js}: Allows for automatic calculation of loudness of Web Audio API Buffer objects, return gain values to correct for a target loudness or match loudness between multiple objects
b@413 57 \item \texttt{mushra.js}: JavaScript file for MUSHRA-style interface \cite{mushra}
b@378 58 \item \texttt{pythonServer.py}: webserver for running tests locally
b@378 59 \item \texttt{pythonServer-legacy.py}: webserver with limited functionality (no automatic storing of output XML files)
b@378 60 \item \texttt{save.php}: PHP script to store result XML files to web server\\
b@378 61 \end{itemize}
b@378 62 \textbf{Documentation (./docs/)}
b@378 63 \begin{itemize}
b@413 64 \item \href{http://c4dm.eecs.qmul.ac.uk/dmrn/events/dmrnp10/#posters}{DMRN+10}: PDF and \LaTeX source of poster for 10\textsuperscript{th} Digital Music Research Network One-Day workshop (``soft launch'')
b@378 65 \item Instructions: PDF and \LaTeX source of these instructions
b@378 66 \item Project Specification Document (\LaTeX/PDF)
b@378 67 \item Results Specification Document (\LaTeX/PDF)
b@413 68 \item SMC15: PDF and \LaTeX source of 12th Sound and Music Computing Conference paper \cite{waet}
b@413 69 \item WAC2016: PDF and \LaTeX source of 2nd Web Audio Conference paper\\
b@378 70 \end{itemize}
b@378 71 \textbf{Example project (./example\_eval/)}
b@378 72 \begin{itemize}
b@378 73 \item An example of what the set up XML should look like, with example audio files 0.wav-10.wav which are short recordings at 44.1kHz, 16bit of a woman saying the corresponding number (useful for testing randomisation and general familiarisation with the interface).\\
b@378 74 \end{itemize}
b@378 75 \textbf{Output files (./saves/)}
b@378 76 \begin{itemize}
b@378 77 \item The output XML files of tests will be stored here by default by the \texttt{pythonServer.py} script.\\
b@378 78 \end{itemize}
b@378 79 \textbf{Auxiliary scripts (./scripts/)}
b@378 80 \begin{itemize}
b@378 81 \item Helpful Python scripts for extraction and visualisation of data.\\
b@378 82 \end{itemize}
b@378 83 \textbf{Test creation tool (./test\_create/)}
b@378 84 \begin{itemize}
b@378 85 \item Webpage for easily setting up your own test without having to delve into the XML.\\
b@378 86 \end{itemize}
b@378 87
b@413 88 \subsection{Compatibility}
b@378 89 As Microsoft Internet Explorer doesn't support the Web Audio API\footnote{\url{http://caniuse.com/\#feat=audio-api}}, you will need another browser like Google Chrome, Safari or Firefox (all three are tested and confirmed to work).
b@413 90
b@413 91 Firefox does not currently support other bit depths than 8 or 16 bit for PCM wave files. In the future, this will throw a warning message to tell the user that their content is being quantised automatically. %Nick? Right? To be removed if and when actually implemented
b@378 92
b@378 93 The tool is platform-independent and works in any browser that supports the Web Audio API. It does not require any specific, proprietary software. However, in case the tool is hosted locally (i.e. you are not hosting it on an actual webserver) you will need Python (2.7), which is a free programming language - see the next paragraph.
b@378 94
b@413 95 \clearpage
b@378 96
b@378 97
b@378 98 \section{Test setup}
b@378 99
b@378 100 \subsection{Sample rate}
b@378 101 Depending on how the experiment is set up, audio is resampled automatically (the Web Audio default) or the sample rate is enforced. In the latter case, you will need to make sure that the sample rate of the system is equal to the sample rate of these audio files. For this reason, all audio files in the experiment will have to have the same sample rate.
b@378 102
b@378 103 Always make sure that all other digital equipment in the playback chain (clock, audio interface, digital-to-analog converter, ...) is set to this same sample rate.
b@378 104
b@378 105 Note that upon changing the sampling rate, the browser will have to be restarted for the change to take effect.
b@378 106
b@378 107 \subsubsection{Mac OS X}
b@378 108 To change the sample rate in Mac OS X, go to \textbf{Applications/Utilities/Audio MIDI Setup} or find this application with Spotlight (see Figure \ref{fig:audiomidisetup}). Then select the output of the audio interface you are using and change the `Format' to the appropriate number. Also make sure the bit depth and channel count are as desired.
b@378 109 If you are using an external audio interface, you may have to go to the preference pane of that device to change the sample rate.
b@378 110
b@378 111 Also make sure left and right channel gains are equal, as some applications alter this without changing it back, leading to a predominantly louder left or right channel. See Figure \ref{fig:audiomidisetup} for an example where the channel gains are different.
b@378 112
b@378 113 \begin{figure}[tb]
b@378 114 \centering
b@378 115 \includegraphics[width=.65\textwidth]{img/audiomidisetup.png}
b@378 116 \caption{The Audio MIDI Setup window in Mac OS X}
b@378 117 \label{fig:audiomidisetup}
b@378 118 \end{figure}
b@378 119
b@378 120 \subsubsection{Windows}
b@378 121 To change the sample rate in Windows, right-click on the speaker icon in the lower-right corner of your desktop and choose `Playback devices'. Right-click the appropriate playback device and click `Properties'. Click the `Advanced' tab and verify or change the sample rate under `Default Format'. % NEEDS CONFIRMATION
b@378 122 If you are using an external audio interface, you may have to go to the preference pane of that device to change the sample rate.
b@378 123
b@378 124 \subsection{Local test}
b@378 125 If the test is hosted locally, you will need to run the local webserver provided with this tool.
b@378 126
b@378 127 \subsubsection{Mac OS X}
b@378 128
b@378 129 On Mac OS X, Python comes preinstalled.
b@378 130
b@378 131 Open the Terminal (find it in \textbf{Applications/Terminal} or via Spotlight), and go to the folder you downloaded. To do this, type \texttt{cd [folder]}, where \texttt{[folder]} is the folder where to find the \texttt{pythonServer.py} script you downloaded. For instance, if the location is \texttt{/Users/John/Documents/test/}, then type
b@378 132
b@378 133 \texttt{cd /Users/John/Documents/test/}
b@378 134
b@378 135 Then hit enter and run the Python script by typing
b@378 136
b@378 137 \texttt{python pythonServer.py}
b@378 138
b@378 139 and hit enter again. See also Figure \ref{fig:terminal}.
b@378 140
b@378 141 \begin{figure}[htbp]
b@378 142 \begin{center}
b@378 143 \includegraphics[width=.75\textwidth]{pythonServer.png}
b@378 144 \caption{Mac OS X: The Terminal window after going to the right folder (\texttt{cd [folder\_path]}) and running \texttt{pythonServer.py}.}
b@378 145 \label{fig:terminal}
b@378 146 \end{center}
b@378 147 \end{figure}
b@378 148
b@378 149 Alternatively, you can simply type \texttt{python} (follwed by a space) and drag the file into the Terminal window from Finder. % DOESN'T WORK YET
b@378 150
b@378 151 You can leave this running throughout the different experiments (i.e. leave the Terminal open).
b@378 152
b@378 153 To start the test, open the browser and type
b@378 154
b@378 155 \texttt{localhost:8000}
b@378 156
b@378 157 and hit enter. The test should start (see Figure \ref{fig:test}).
b@378 158
b@378 159 To quit the server, either close the terminal window or press Ctrl+C on your keyboard to forcibly shut the server.
b@378 160
b@378 161 \subsubsection{Windows}
b@378 162
b@378 163 On Windows, Python 2.7 is not generally preinstalled and therefore has to be downloaded\footnote{\url{https://www.python.org/downloads/windows/}} and installed to be able to run scripts such as the local webserver, necessary if the tool is hosted locally.
b@378 164
b@378 165 Simply double click the Python script \texttt{pythonServer.py} in the folder you downloaded.
b@378 166
b@378 167 You may see a warning like the one in Figure \ref{fig:warning}. Click `Allow access'.
b@378 168
b@378 169 \begin{figure}[htbp]
b@378 170 \begin{center}
b@378 171 \includegraphics[width=.6\textwidth]{warning.png}
b@378 172 \caption{Windows: Potential warning message when executing \texttt{pythonServer.py}.}
b@378 173 \label{fig:warning}
b@378 174 \end{center}
b@378 175 \end{figure}
b@378 176
b@378 177 The process should now start, in the Command prompt that opens - see Figure \ref{fig:python}.
b@378 178
b@378 179 \begin{figure}[htbp]
b@378 180 \begin{center}
b@378 181 \includegraphics[width=.75\textwidth]{python.png}
b@378 182 \caption{Windows: The Command Prompt after running \texttt{pythonServer.py} and opening the corresponding website.}
b@378 183 \label{fig:python}
b@378 184 \end{center}
b@378 185 \end{figure}
b@378 186
b@378 187 You can leave this running throughout the different experiments (i.e. leave the Command Prompt open).
b@378 188
b@378 189 To start the test, open the browser and type
b@378 190
b@378 191 \texttt{localhost:8000}
b@378 192
b@378 193 and hit enter. The test should start (see Figure \ref{fig:test}).
b@378 194
b@378 195 \begin{figure}[htb]
b@378 196 \begin{center}
b@378 197 \includegraphics[width=.8\textwidth]{test.png}
b@378 198 \caption{The start of the test in Google Chrome on Windows 7.}
b@378 199 \label{fig:test}
b@378 200 \end{center}
b@378 201 \end{figure}
b@378 202
b@378 203 If at any point in the test the participant reports weird behaviour or an error of some kind, or the test needs to be interrupted, please notify the experimenter and/or refer to Section \ref{sec:troubleshooting}.
b@378 204
b@378 205 When the test is over (the subject should see a message to that effect, and click `Submit' one last time), the output XML file containing all collected data should have appeared in `saves/'. The names of these files are `test-0.xml', `test-1.xml', etc., in ascending order. The Terminal or Command prompt running the local web server will display the following file name. If such a file did not appear, please again refer to Section \ref{sec:troubleshooting}.
b@378 206
b@378 207 It is advised that you back up these results as often as possible, as a loss of this data means that the time and effort spent by the subject(s) has been in vain. Save the results to an external or network drive, and/or send them to the experimenter regularly.
b@378 208
b@378 209 To start the test again for a new participant, you do not need to close the browser or shut down the Terminal or Command Prompt. Simply refresh the page or go to \texttt{localhost:8000} again.
b@378 210
b@378 211
b@378 212 \subsection{Remote test}
b@378 213 Put all files on a web server which supports PHP. This allows the `save.php' script to store the XML result files in the `saves/' folder. If the web server is not able to store the XML file there at the end of the test, it will present the XML file locally to the user, as a `Save file' link.
b@413 214
b@413 215 Make sure the \texttt{projectReturn} attribute of the \texttt{setup} node is set to the \texttt{save.php} script.
b@413 216
b@413 217 Then, just go to the URL of the corresponding HTML file, e.g. \texttt{http://server.com/path/to/WAET/index.html?url=test/my-test.xml}. If storing on the server doesn't work at submission (e.g. if the \texttt{projectReturn} attribute isn't properly set), the result XML file will be presented to the subject on the client side, as a `Save file' link.
b@413 218
b@413 219
b@378 220 \clearpage
b@413 221
b@413 222 \section{Interfaces}
b@413 223
b@413 224 The Web Audio Evaluation Tool comes with a number of interface styles, each of which can be customised extensively, either by configuring them differently using the many optional features, or by modifying the JavaScript files.
b@413 225
b@413 226 To set the interface style for the whole test, %Nick? change when this is not the case anymore, i.e. when the interface can be set per page
b@413 227 add \texttt{interface="APE"} to the \texttt{setup} node, where \texttt{"APE"} is one of the interface names below.
b@413 228
b@413 229 \subsection{APE}
b@413 230 The APE interface is based on \cite{ape}, and consists of one or more axes, each corresponding with an attribute to be rated, on which markers are placed. As such, it is a multiple stimulus interface where (for each dimension or attribute) all elements are on one axis so that they can be maximally compared against each other, as opposed to rated individually or with regards to a single reference.
b@413 231 It also contains an optional text box for each element, to allow for clarification by the subject, tagging, and so on.
b@413 232
b@413 233 \subsection{MUSHRA}
b@413 234 This is a straightforward implementation of \cite{mushra}, especially common for the rating of audio quality, for instance for the evaluation of audio codecs.
b@413 235
b@378 236
b@413 237 \clearpage
b@413 238
b@413 239 \section{Features}
b@413 240
b@413 241 This section goes over the different features implemented in the Web Audio Evaluation Tool, how to use them, and what to know about them.
b@413 242
b@413 243 Unless otherwise specified, \emph{each} feature described here is optional, i.e. it can be enabled or disabled and adjusted to some extent.
b@413 244
b@413 245 As the example project showcases (nearly) all of these features, please refer to its configuration XML document for a demonstration of how to enable and adjust them.
b@413 246
b@413 247 \subsection{Surveys}
b@413 248 \subsubsection{Pre- and post-page surveys}
b@413 249
b@413 250 \subsubsection{Pre- and post-test surveys}
b@413 251
b@413 252 \subsubsection{Survey elements}
b@413 253 All survey elements (which `pop up' in the centre of the browser) have an \texttt{id} attribute, for retrieval of the responses in post-processing of the results, and a \texttt{mandatory} attribute, which if set to ``true'' requires the subjects to respond before they can continue.
b@413 254
b@413 255 \begin{description}
b@413 256 \item[statement] Simply shows text to the subject until `Next' or `Start' is clicked.
b@413 257 \item[question] Expects a text answer (in a text box). Has the \texttt{boxsize} argument: set to ``large'' or ``huge'' for a bigger box size.
b@413 258 \item[number] Expects a numerical value. Attribute \texttt{min="0"} specifies the minimum value - in this case the answer must be stricly positive before the subject can continue.
b@413 259 \item[radio] Radio buttons.
b@429 260 \item[checkbox] Checkboxes. Note that when making a checkbox question ``mandatory'', the subject is forced to select at least one option (which could be e.g. `Other' or `None').\\
b@413 261 \end{description}
b@413 262
b@413 263 \textbf{Example usage:}\\
b@413 264
b@413 265 \lstset{
b@413 266 basicstyle=\ttfamily,
b@413 267 columns=fullflexible,
b@413 268 showstringspaces=false,
b@413 269 commentstyle=\color{grey}\upshape
b@413 270 }
b@413 271
b@413 272 \lstdefinelanguage{XML}
b@413 273 {
b@413 274 morestring=[b]",
b@413 275 morestring=[s]{>}{<},
b@413 276 morecomment=[s]{<?}{?>},
b@413 277 stringstyle=\color{black} \bfseries,
b@413 278 identifierstyle=\color{darkblue} \bfseries,
b@413 279 keywordstyle=\color{cyan} \bfseries,
b@413 280 morekeywords={xmlns,version,type},
b@413 281 breaklines=true% list your attributes here
b@413 282 }
b@413 283 \scriptsize
b@413 284 \lstset{language=XML}
b@413 285
b@413 286 \begin{lstlisting}
b@413 287 <PostTest>
b@413 288 <question id="location" mandatory="true" boxsize="large">Please enter your location. (example mandatory text question)</question>
b@413 289 <number id="age" min="0">Please enter your age (example non-mandatory number question)</number>
b@413 290 <radio id="rating">
b@413 291 <statement>Please rate this interface (example radio button question)</statement>
b@413 292 <option name="bad">Bad</option>
b@413 293 <option name="poor">Poor</option>
b@413 294 <option name="good">Good</option>
b@413 295 <option name="great">Great</option>
b@413 296 </radio>
b@429 297 <checkbox id="checkboxtest" mandatory="true">
b@429 298 <statement>Please select with which activities you have any experience (example checkbox question)</statement>
b@429 299 <option name="musician">Playing a musical instrument</option>
b@429 300 <option name="soundengineer">Recording or mixing audio</option>
b@429 301 </checkbox>
b@413 302 <statement>Thank you for taking this listening test. Please click 'Submit' and your results will appear in the 'saves/' folder.</statement>
b@413 303 </PostTest>
b@413 304 \end{lstlisting}
b@413 305
b@413 306
b@413 307 \subsection{Randomisation}
b@413 308
b@413 309 \subsubsection{Randomisation of configuration XML files}
b@413 310 % how to
b@413 311 % explain how this is implemented in the pythonServer
b@413 312 %Nick? already implemented in the PHP?
b@413 313
b@413 314
b@413 315 \subsubsection{Randomsation of page order}
b@413 316
b@413 317
b@413 318 \subsubsection{Randomisation of axis order}
b@413 319
b@413 320 \subsubsection{Randomisation of fragment order}
b@413 321
b@413 322
b@413 323 \subsubsection{Randomisation of initial slider position}
b@413 324
b@413 325 % /subsubsection{Randomisation of survey question order}
b@413 326 % should be an attribute of the individual 'pretest' and 'posttest' elements
b@413 327 % uncomment once we have it
b@413 328
b@413 329 \subsection{Looping}
b@413 330 Loops the fragments
b@413 331 % how to enable?
b@413 332 If the fragments are not of equal length initially, they are padded with zeros so that they are equal length, to enable looping without the fragments going out of sync relative to each other.
b@413 333
b@413 334 Note that fragments cannot be played until all fragments are loaded when in looped mode, as the engine needs to assess whether all %Nick? Is this accurate?
b@413 335
b@413 336 \subsection{Sample rate}
b@413 337 If you require the test to be conducted at a certain sample rate (i.e. you do not tolerate resampling of the elements to correspond with the system's sample rate), add \texttt{sampleRate="96000"} - where ``96000'' can be any support sample rate - so that a warning message is shown alerting the subject the system's sample rate is different from this enforced sample rate. This of course means that in one test, all sample rates must be equal as it is impossible to change the system's sample rates during the test (even if you were to manually change it, then the browser must be restarted for it to take effect).
b@413 338
b@413 339 \subsection{Scrubber bar}
b@413 340 The scrubber bar, or transport bar (that is the name of the visualisation of the playhead thing with an indication of time and showing the portion of the file played so far) is at this point just a visual, and not a controller to adjust the playhead position.
b@413 341
b@413 342 Make visible by adding \texttt{<option name='playhead'/>} to the \texttt{interface} node (see Section \ref{sec:checks}: Checks).
b@413 343
b@413 344 \subsection{Metrics}
b@413 345 Enable the collection of metrics by adding \texttt{collectMetrics=`true'} in the \texttt{setup} node.
b@413 346
b@413 347 The \texttt{Metric} node, which contains the metrics to be tracked during the complete test, is a child of the \texttt{setup} node, and it could look as follows.
b@413 348
b@413 349 \begin{lstlisting}
b@413 350 <Metric>
b@413 351 <metricEnable>testTimer</metricEnable>
b@413 352 <metricEnable>elementTimer</metricEnable>
b@413 353 <metricEnable>elementInitialPosition</metricEnable>
b@413 354 <metricEnable>elementTracker</metricEnable>
b@413 355 <metricEnable>elementFlagListenedTo</metricEnable>
b@413 356 <metricEnable>elementFlagMoved</metricEnable>
b@413 357 <metricEnable>elementListenTracker</metricEnable>
b@413 358 </Metric>
b@413 359 \end{lstlisting}
b@413 360
b@413 361 When in doubt, err on the inclusive side, as one never knows which information is needed in the future. Most of these metrics are necessary for post-processing scripts such as timeline\_view\_movement.py.
b@413 362
b@413 363 \subsubsection{Time test duration}
b@413 364 \texttt{testTimer}\\
b@413 365
b@413 366 \subsubsection{Time fragment playback}
b@413 367 \texttt{elementTimer}\\
b@413 368 This keeps track of when each fragment is played back and stopped again, \emph{and} which part of the fragment has been played back at that time.
b@413 369 % example output?
b@413 370
b@413 371 \subsubsection{Initial positions}
b@413 372 \texttt{elementInitialPosition}\\
b@413 373 Tracks the initial position of the sliders, especially relevant when these are randomised, so their influence
b@413 374
b@413 375 \subsubsection{Track movements}
b@413 376
b@413 377 \subsubsection{Which fragments listened to}
b@413 378
b@413 379 \subsubsection{Which fragments moved}
b@413 380 Binary check whether or not a the marker corresponding with a particular fragment was moved at all throughout the experiment.
b@413 381
b@413 382 \subsubsection{elementListenTracker} %Nick? No idea what this does, if it's not what I wrote under 'Time fragment playback'
b@413 383
b@413 384 \subsection{References and anchors}
b@413 385 \subsubsection{Reference}
b@413 386 %...
b@413 387 \subsubsection{Hidden reference}
b@413 388 %...
b@413 389 \subsubsection{Hidden anchor}
b@413 390 %...
b@413 391
b@413 392 \subsection{Checks}
b@413 393 \label{sec:checks}
b@413 394
b@413 395 %blabla
b@413 396 These checks are enabled in the \texttt{interface} node, which is a child of the \texttt{setup} node.
b@413 397 \subsubsection{Playback checks}
b@413 398 % what it does/is
b@413 399 Enforce playing each sample at least once, for at least a little bit (e.g. this test is satisfied even if you only play a tiny portion of the file), by alerting the user to which samples have not been played upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before clicking each sample at least once.
b@413 400 % how to enable/disable
b@413 401
b@413 402 Alternatively, one can check whether the \emph{entire} fragment was listened to at least once.
b@413 403 % how to enable
b@413 404
b@413 405 Add \texttt{<check name="fragmentPlayed"/>} to the \texttt{interface} node.
b@413 406
b@413 407
b@413 408 \subsubsection{Movement check}
b@413 409 Enforce moving each sample at least once, for at least a little bit (e.g. this test is satisfied even if you only play a tiny portion of the file), by alerting the user to which samples have not been played upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before clicking each sample at least once.
b@413 410 If there are several axes, the warning will specify which samples have to be moved on which axis.
b@413 411
b@413 412 Add \texttt{<check name="fragmentMoved"/>} to the \texttt{interface} node.
b@413 413
b@413 414 \subsubsection{Comment check}
b@413 415 % How to enable/disable?
b@413 416
b@413 417 Enforce commenting, by alerting the user to which samples have not been commented on upon clicking `Submit'. When enabled, one cannot proceed to the next page, answer a survey question, or finish the test, before putting at least one character in each comment box.
b@413 418
b@413 419 Note that this does not apply to any extra (text, radio button, checkbox) elements, unless these have the `mandatory' option enabled. %Nick? is this extra 'mandatory' option implemented?
b@413 420
b@413 421 Add \texttt{<check name="fragmentComments"/>} to the \texttt{interface} node.
b@413 422
b@413 423 %ADD: how to add a custom comment box
b@413 424
b@413 425 \subsubsection{Scale use check}
b@413 426 It is possible to enforce a certain usage of the scale, meaning that at least one slider needs to be below and/or above a certain percentage of the slider.
b@413 427
b@413 428 Add \texttt{<check name="scalerange" min="25" max="75"/>} to the \texttt{interface} node.
b@413 429
b@413 430 \subsubsection{Note on the use of multiple rating axes}
b@413 431 I.e. what if more than one axis? How to specify which axis the checks relate to? %Nick? to add?
b@413 432
b@413 433 \subsection{Layout options}
b@413 434 \texttt{title}, \texttt{scale}, \texttt{position}, \texttt{commentBoxPrefix}
b@413 435
b@413 436 \subsection{Multiple sliders}
b@413 437 (APE example)
b@413 438
b@413 439 \begin{lstlisting}
b@413 440 <interface name="preference">
b@413 441 <title>Preference</title>
b@413 442 <scale position="0">Min</scale>
b@413 443 <scale position="100">Max</scale>
b@413 444 <scale position="50">Middle</scale>
b@413 445 <commentBoxPrefix>Comment on fragment</commentBoxPrefix>
b@413 446 </interface>
b@413 447 <interface name="depth">
b@413 448 <title>Depth</title>
b@413 449 <scale position="0">Low</scale>
b@413 450 <scale position="100">High</scale>
b@413 451 <scale position="50">Middle</scale>
b@413 452 <commentBoxPrefix>Comment on fragment</commentBoxPrefix>
b@413 453 </interface>
b@413 454 \end{lstlisting}
b@413 455 where the \texttt{interface} nodes are children of the \texttt{audioholder} node.
b@413 456
b@413 457 \subsection{Platform information}
b@413 458 % what does it do, what does it look like
b@413 459 % limitations?
b@413 460
b@413 461 \subsection{Show progress}
b@413 462 Add \texttt{<option name="page-count"/>} to the \texttt{interface} node (see Section \ref{sec:checks}: Checks) to add the current page number and the total number of pages to the interface.
b@413 463
b@413 464 \subsection{Gain}
b@413 465 It is possible to set the gain (in decibel) applied to the different audioelements, as an attribute of the \texttt{audioelement} nodes in the configuration XML file:
b@413 466
b@413 467 \texttt{<audioElements url="sample-01.wav" gain="-6" id="sample01quieter" />}
b@413 468
b@413 469 \subsection{Loudness}
b@413 470 % automatic loudness equalisation
b@413 471 % guide to loudness.js
b@413 472 Set the loudness for a complete test by adding \texttt{loudness="-23"} to the \texttt{setup} node in the configuration XML file, where -23 is an example loudness in LUFS.
b@413 473
b@413 474 \clearpage
b@413 475
b@378 476
b@378 477 \section{Using the test create tool}
b@378 478 We provide a test creation tool, available in the directory test\_create. This tool is a self-contained web page, so doubling clicking will launch the page in your system default browser.
b@378 479
b@378 480 The test creation tool can help you build a simple test very quickly. By simply selecting your interface and clicking check-boxes you can build a test in minutes.
b@378 481
b@378 482 Include audio by dragging and dropping the stimuli you wish to include.
b@378 483
b@378 484 The tool examines your XML before exporting to ensure you do not export an invalid XML structure which would crash the test.
b@378 485
b@378 486 This guide will help you to construct your own interface on top of the WAET (Web Audio Evaluation Tool) engine. The WAET engine resides in the core.js file, this contains prototype objects to handle most of the test creation, operation and data collection. The interface simply has to link into this at the correct points.
b@378 487
b@378 488 \subsection{Nodes to familiarise}
b@378 489 Core.js handles several very important nodes which you should become familiar with. The first is the Audio Engine, initialised and stored in variable `AudioEngineContext'. This handles the playback of the web audio nodes as well as storing the `AudioObjects'. The `AudioObjects' are custom nodes which hold the audio fragments for playback. These nodes also have a link to two interface objects, the comment box if enabled and the interface providing the ranking. On creation of an `AudioObject' the interface link will be nulled, it is up to the interface to link these correctly.
b@378 490
b@378 491 The specification document will be decoded and parsed into an object called `specification'. This will hold all of the specifications various nodes. The test pages and any pre/post test objects are processed by a test state which will proceed through the test when called to by the interface. Any checks (such as playback or movement checks) are to be completed by the interface before instructing the test state to proceed. The test state will call the interface on each page load with the page specification node.
b@378 492
b@378 493 \subsection{Modifying \texttt{core.js}}
b@378 494 Whilst there is very little code actually needed, you do need to instruct core.js to load your interface file when called for from a specification node. There is a function called `loadProjectSpecCallback' which handles the decoding of the specification and setting any external items (such as metric collection). At the very end of this function there is an if statement, add to this list with your interface string to link to the source. There is an example in there for both the APE and MUSHRA tests already included. Note: Any updates to core.js in future work will most likely overwrite your changes to this file, so remember to check your interface is still here after any update that interferes with core.js.
b@378 495 Any further files can be loaded here as well, such as css styling files. jQuery is already included.
b@378 496
b@378 497 \subsection{Building the Interface}
b@378 498 Your interface file will get loaded automatically when the `interface' attribute of the setup node matches the string in the `loadProjectSpecCallback' function. The following functions must be defined in your interface file.
b@378 499 \begin{itemize}
b@378 500 \item \texttt{loadInterface} - Called once when the document is parsed. This creates any necessary bindings, such as to the metric collection classes and any check commands. Here you can also start the structure for your test such as placing in any common nodes (such as the title and empty divs to drop content into later).
b@378 501 \item \texttt{loadTest(audioHolderObject)} - Called for each page load. The audioHolderObject contains a specification node holding effectively one of the audioHolder nodes.
b@378 502 \item \texttt{resizeWindow(event)} - Handle for any window resizing. Simply scale your interface accordingly. This function must be here, but can me an empty function call.
b@378 503 \end{itemize}
b@378 504
b@378 505 \subsubsection{loadInterface}
b@378 506 This function is called by the interface once the document has been parsed since some browsers may parse files asynchronously. The best method is simply to put `loadInterface()' at the top of your interface file, therefore when the JavaScript engine is ready the function is called.
b@378 507
b@378 508 By default the HTML file has an element with id ``topLevelBody'' where you can build your interface. Make sure you blank the contents of that object. This function is the perfect time to build any fixed items, such as the page title, session titles, interface buttons (Start, Stop, Submit) and any holding and structure elements for later on.
b@378 509
b@378 510 At the end of the function, insert these two function calls: testState.initialise() and testState.advanceState();. This will actually begin the test sequence, including the pre-test options (if any are included in the specification document).
b@378 511
b@378 512 \subsubsection{loadTest(audioHolderObject)}
b@378 513 This function is called on each new test page. It is this functions job to clear out the previous test and set up the new page. Use the function audioEngineContext.newTestPage(); to instruct the audio engine to prepare for a new page. ``audioEngineContext.audioObjects = [];'' will delete any audioObjects, interfaceContext.deleteCommentBoxes(); will delete any comment boxes and interfaceContext.deleteCommentQuestions(); will delete any extra comment boxes specified by commentQuestion nodes.
b@378 514
b@378 515 This function will need to instruct the audio engine to build each fragment. Just passing the constructor each element from the audioHolderObject will build the track, audioEngineContext.newTrack(element) (where element is the audioHolderObject audio element). This will return a reference to the constructed audioObject. Decoding of the audio will happen asynchronously.
b@378 516
b@378 517 You also need to link audioObject.interfaceDOM with your interface object for that audioObject. The interfaceDOM object has a few default methods. Firstly it must start disabled and become enabled once the audioObject has decoded the audio (function call: enable()). Next it must have a function exportXMLDOM(), this will return the xml node for your interface, however the default is for it to return a value node, with textContent equal to the normalised value. You can perform other functions, but our scripts may not work if something different is specified (as it will breach our results specifications). Finally it must also have a method getValue, which returns the normalised value.
b@378 518
b@378 519 It is also the job the interfaceDOM to call any metric collection functions necessary, however some functions may be better placed outside (for example, the APE interface uses drag and drop, therefore the best way was to call the metric functions from the dragEnd function, which is called when the interface object is dropped). Metrics based upon listening are handled by the audioObject. The interfaceDOM object must manage any movement metrics. For a list of valid metrics and their behaviours, look at the project specification document included in the repository/docs location. The same goes for any checks required when pressing the submit button, or any other method to proceed the test state.
b@378 520
b@413 521 \clearpage
b@413 522 \section{Analysis and diagnostics}
b@413 523 \subsection{In the browser}
b@413 524 See `analysis.html' in the main folder: immediate visualisation of (by default) all results in the `saves/' folder.
b@413 525
b@413 526 \subsection{Python scripts}
b@413 527 The package includes Python (2.7) scripts (in `scripts/') to extract ratings and comments, generate visualisations of ratings and timelines, and produce a fully fledged report.
b@413 528
b@413 529 Visualisation requires the free matplotlib toolbox (http://matplotlib.org), numpy and scipy.
b@413 530 By default, the scripts can be run from the `scripts' folder, with the result files in the `saves' folder (the default location where result XMLs are stored). Each script takes the XML file folder as an argument, along with other arguments in some cases.
b@413 531 Note: to avoid all kinds of problems, please avoid using spaces in file and folder names (this may work on some systems, but others don't like it).
b@413 532
b@413 533 \subsubsection{comment\_parser.py}
b@413 534 Extracts comments from the output XML files corresponding with the different subjects found in `saves/'. It creates a folder per `audioholder'/page it finds, and stores a CSV file with comments for every `audioelement'/fragment within these respective `audioholders'/pages. In this CSV file, every line corresponds with a subject/output XML file. Depending on the settings, the first column containing the name of the corresponding XML file can be omitted (for anonymisation).
b@413 535 Beware of Excel: sometimes the UTF-8 is not properly imported, leading to problems with special characters in the comments (particularly cumbersome for foreign languages).
b@413 536
b@413 537 \subsubsection{evaluation\_stats.py}
b@413 538 Shows a few statistics of tests in the `saves/' folder so far, mainly for checking for errors. Shows the number of files that are there, the audioholder IDs that were tested (and how many of each separate ID), the duration of each page, the duration of each complete test, the average duration per page, and the average duration in function of the page number.
b@413 539
b@413 540 \subsubsection{generate\_report.py}
b@413 541 Similar to `evaluation\_stats.py', but generates a PDF report based on the output files in the `saves/' folder - or any folder specified as command line argument. Uses pdflatex to write a LaTeX document, then convert to a PDF.
b@413 542
b@413 543 \subsubsection{score\_parser.py}
b@413 544 Extracts rating values from the XML to CSV - necessary for running visualisation of ratings. Creates the folder `saves/ratings/' if not yet created, to which it writes a separate file for every `audioholder'/page in any of the output XMLs it finds in `saves/'. Within each file, rows represent different subjects (output XML file names) and columns represent different `audioelements'/fragments.
b@413 545
b@413 546 \subsubsection{score\_plot.py}
b@413 547 Plots the ratings as stored in the CSVs created by score\_parser.py
b@413 548 Depending on the settings, it displays and/or saves (in `saves/ratings/') a boxplot, confidence interval plot, scatter plot, or a combination of the aforementioned.
b@413 549 Requires the free matplotlib library.
b@413 550 At this point, more than one subjects are needed for this script to work.
b@413 551
b@413 552 \subsubsection{timeline\_view\_movement.py}
b@413 553 Creates a timeline for every subject, for every `audioholder'/page, corresponding with any of the output XML files found in `saves/'. It shows the marker movements of the different fragments, along with when each fragment was played (red regions). Automatically takes fragment names, rating axis title, rating axis labels, and audioholder name from the XML file (if available).
b@413 554
b@413 555 \subsubsection{timeline\_view.py} % should be omitted or absorbed by the above soon
b@413 556 Creates a timeline for every subject, for every `audioholder'/page, corresponding with any of the output XML files found in `saves/'. It shows when and for how long the subject listened to each of the fragments.
b@413 557
b@378 558
b@378 559
b@378 560 \clearpage
b@378 561 \section{Troubleshooting} \label{sec:troubleshooting}
b@413 562 \subsection{Reporting bugs and requesting features}
b@413 563 Thanks to feedback from using the interface in experiments by the authors and others, many bugs have been caught and fatal crashes due to the interface seem to be a thing of the past entirely.
b@378 564
b@413 565 We continually develop this tool to fix issues and implement features useful to us or our user base. See \url{https://code.soundsoftware.ac.uk/projects/webaudioevaluationtool/issues} for a list of feature requests and bug reports, and their status.
b@378 566
b@413 567 Please contact the authors if you experience any bugs, if you would like additional functionality, if you spot any errors or gaps in the documentation, if you have questions about using the interface, or if you would like to give any feedback (even positive!) about the interface. We look forward to learning how the tool has (not) been useful to you.
b@378 568
b@378 569
b@413 570 \subsection{First aid}
b@413 571 Meanwhile, if things do go wrong or the test needs to be interrupted for whatever reason, all data is not lost. In a normal scenario, the test needs to be completed until the end (the final `Submit'), at which point the output XML is stored in the \texttt{saves/}. If this stage is not reached, open the JavaScript Console (see below for how to find it) and type
b@378 572
b@413 573 \texttt{createProjectSave()}
b@378 574
b@413 575 to present the result XML file on the client side, or
b@378 576
b@413 577 \texttt{createProjectSave(specification.projectReturn)}
b@378 578
b@413 579 to try to store it to the specified location, e.g. the `saves/' folder on the web server or the local machine (on failure the result XML should be presented directly in the web browser instead)
b@378 580
b@413 581 and hit enter. This will open a pop-up window with a hyperlink that reads `Save File'; click it and an XML file with results until that point should be stored in your download folder.
b@413 582
b@413 583 Alternatively, a lot of data can be read from the same console, in which the tool prints a lot of debug information. Specifically:
b@413 584 \begin{itemize}
b@413 585 \item the randomisation of pages and fragments are logged;
b@413 586 \item any time a slider is played, its ID and the time stamp (in seconds since the start of the test) are displayed;
b@413 587 \item any time a slider is dragged and dropped, the location where it is dropped including the time stamp are shown;
b@413 588 \item any comments and pre- or post-test questions and their answers are logged as well.
b@413 589 \end{itemize}
b@378 590
b@413 591 You can select all this and save into a text file, so that none of this data is lost. You may to choose to do this even when a test was successful as an extra precaution.
b@378 592
b@413 593 If you encounter any issue which you believe to be caused by any aspect of the tool, and/or which the documentation does not mention, please do let us know!
b@378 594
b@413 595 \subsubsection*{Opening the JavaScript Console}
b@413 596 \begin{itemize}
b@413 597 \item In Google Chrome, the JavaScript Console can be found in \textbf{View$>$Developer$>$JavaScript Console}, or via the keyboard shortcut Cmd + Alt + J (Mac OS X).
b@413 598 \item In Safari, the JavaScript Console can be found in \textbf{Develop$>$Show Error Console}, or via the keyboard shortcut Cmd + Alt + C (Mac OS X). Note that for the Developer menu to be visible, you have to go to Preferences (Cmd + ,) and enable `Show Develop menu in menu bar' in the `Advanced' tab. \textbf{Note that as long as the Developer menu is not visible, nothing is logged to the console, i.e. you will only be able to see diagnostic information from when you switched on the Developer tools onwards.}
b@413 599 \item In Firefox, go to \textbf{Tools$>$Web Developer$>$Web Console}, or hit Cmd + Alt + K.
b@413 600 \end{itemize}
b@378 601
b@413 602 \subsection{Known issues and limitations}
b@413 603 \label{sec:knownissues}
b@413 604
b@413 605 The following is a non-exhaustive list of problems and limitations you may experience using this tool, due to not being supported yet by us, or by the Web Audio API and/or (some) browsers.
b@413 606
b@413 607 \begin{itemize}
b@413 608 \item Issue \href{https://code.soundsoftware.ac.uk/issues/1463}{\textbf{\#1463}}: \textbf{Firefox} only supports 8 bit and 16 bit WAV files. Pending automatic requantisation (which deteriorates the audio signal's dynamic range to some extent), WAV format stimuli need to adhere to these limitations in order for the test to be compatible with Firefox.
b@413 609 \item Issues \href{https://code.soundsoftware.ac.uk/issues/1474}{\textbf{\#1474}} and \href{https://code.soundsoftware.ac.uk/issues/1462}{\textbf{\#1462}}: On occasions, audio is not working - or only a continuous `beep' can be heard - notably in \textbf{Safari}. Refreshing, quitting the browser and even enabling Developer tools in Safari's Preferences pane (`Advanced' tab: ``Show `Develop' menu in menu bar'') has helped resolve this. If no (high quality) audio can be heard, make sure your entire playback system's settings are all correct.
b@413 610 \end{itemize}
b@378 611
b@378 612 \clearpage
b@378 613 \bibliographystyle{ieeetr}
b@378 614 \bibliography{Instructions}{}
b@378 615
b@378 616
b@378 617 \clearpage
b@378 618 \appendix
b@378 619
b@413 620 \section{Legacy}
b@413 621 The APE interface and most of the functionality of the first WAET editions are inspired by the APE toolbox for MATLAB \cite{ape}. See \url{https://code.soundsoftware.ac.uk/projects/ape} for the source code and \url{http://brechtdeman.com/publications/aes136.pdf} for the corresponding paper.
b@413 622
b@413 623 \clearpage
b@413 624
b@378 625 \section{Listening test instructions example}
b@378 626
b@378 627 Before each test, show the instructions below or similar and make sure it is available to the subject throughout the test. Make sure to ask whether the participant has any questions upon seeing and/or reading the instructions.
b@378 628
b@378 629 \begin{itemize}
b@378 630 \item You will be asked for your name (``John Smith'') and location (room identifier).
b@378 631 \item An interface will appear, where you are asked to
b@378 632 \begin{itemize}
b@378 633 \item click green markers to play the different mixes;
b@378 634 \item drag the markers on a scale to reflect your preference for the mixes;
b@378 635 \item comment on these mixes, using text boxes with corresponding numbers (in your \textbf{native language});
b@378 636 \item optionally comment on all mixes together, or on the song, in `General comments'.
b@378 637 \end{itemize}
b@378 638 \item You are asked for your personal, honest opinion. Feel free to use the full range of the scale to convey your opinion of the various mixes. Don?t be afraid to be harsh and direct.
b@378 639 \item The markers appear at random positions at first (which means some markers may hide behind others).
b@378 640 \item The interface can take a few seconds to start playback, but switching between mixes should be instantaneous.
b@378 641 \item This is a research experiment, so please forgive us if things go wrong. Let us know immediately and we will fix it or restart the test.
b@378 642 \item When the test is finished (after all songs have been evaluated), just call the experimenter, do NOT close the window.
b@378 643 \item After the test, please fill out our survey about your background, experience and feedback on the test.
b@378 644 \item By participating, you consent to us using all collected data for research. Unless asked explicitly, all data will be anonymised when shared.
b@378 645 \end{itemize}
b@378 646
b@378 647 \clearpage
b@378 648
b@429 649 \section{Terminology} % just to keep track of what exactly we call things. Don't use terms that are too different, to avoid confusion.
b@429 650 As a guide to better understand the Instructions, and to expand them later, here is a list of terms that may be unclear or ambiguous unless properly defined.
b@413 651 \begin{description}
b@413 652 \item[Subject] The word we use for a participant, user, ... of the test, i.e. not the experimenter who designs the test but the person who evaluates the audio under test as part of an experiment (or the preparation of one).
b@429 653 \item[User] The person who uses the tool to configure, run and analyse the test - i.e. the experimenter, most likely a researcher - or at least
b@413 654 \item[Page] A screen in a test; corresponds with an \texttt{audioholder}
b@413 655 \item[Fragment] An element or sample in a test; corresponds with an \texttt{audioelement}
b@413 656 \item[Test] A complete test which can consist of several pages; corresponds with an entire configuration XML file
b@413 657 \item[Configuration XML file] The XML file containing the necessary information on interface, samples, survey questions, configurations, ... which the JavaScript modules read to produce the desired test.
b@413 658 \item[Results XML file] The output of a successful test, including ratings, comments, survey responses, timing information, and the complete configuration XML file with which the test was generated in the first place.
b@413 659 \end{description}
b@413 660
b@413 661 \clearpage
b@413 662
b@413 663 \setcounter{secnumdepth}{0} % don't number this last bit
b@413 664 \section{Contact details} % maybe add web pages, Twitter accounts, whatever you like
b@378 665 \label{sec:contact}
b@378 666
b@378 667 \begin{itemize}
b@378 668 \item Nicholas Jillings: \texttt{nicholas.jillings@mail.bcu.ac.uk}
b@378 669 \item Brecht De Man: \texttt{b.deman@qmul.ac.uk}
b@378 670 \item David Moffat: \texttt{d.j.moffat@qmul.ac.uk}
b@378 671 \end{itemize}
b@378 672
b@378 673 \end{document}