changeset 726:949185ac04a9

Paper: bits of introduction and remote testing, minor edits
author Brecht De Man <BrechtDeMan@users.noreply.github.com>
date Tue, 29 Sep 2015 23:55:10 +0200
parents 16bafae452a8
children 3b637867eafe
files docs/WAC2016/WAC2016.bib docs/WAC2016/WAC2016.pdf docs/WAC2016/WAC2016.tex
diffstat 3 files changed, 90 insertions(+), 15 deletions(-) [+]
line wrap: on
line diff
--- a/docs/WAC2016/WAC2016.bib	Tue Sep 29 20:50:53 2015 +0200
+++ b/docs/WAC2016/WAC2016.bib	Tue Sep 29 23:55:10 2015 +0200
@@ -1,13 +1,47 @@
 %% This BibTeX bibliography file was created using BibDesk.
 %% http://bibdesk.sourceforge.net/
 
-%% Created for Brecht De Man at 2015-09-29 20:37:59 +0200 
+%% Created for Brecht De Man at 2015-09-29 21:47:35 +0200 
 
 
 %% Saved with string encoding Unicode (UTF-8) 
 
 
 
+@book{bech,
+	Annote = {p 115: GLS
+	- desired sample population
+	- normal hearing acuity (C4DM-wide test?)
+	- sensitive to audio quality characteristics
+	- ability to repeatedly rate stimuli consistently
+	- available for performing listening tests
+- web basd questionnaire
+- pure tone audiometry (?)
+- screening experiments => able to find pairs?
+
+p 125
+no audiometric measure can discriminate between naive and experienced listener
+listeners will have different strategies for evaluation: care should be exercised when averaging across listeners
+previous listening skills = important
+
+p 126
+ability direct influence on statistical resolution of test
+matching test (at the same time familiarisation): 80% at least
+
+p 167
+intra-subject reliability},
+	Author = {Bech, S. and Zacharov, N.},
+	Date-Added = {2015-09-29 19:47:28 +0000},
+	Date-Modified = {2015-09-29 19:47:28 +0000},
+	Isbn = {9780470869246},
+	Keywords = {psychophysics,perception; listening tests; perceptual evaluation},
+	Publisher = {John Wiley \& Sons},
+	Read = {1},
+	Title = {Perceptual Audio Evaluation - Theory, Method and Application},
+	Url = {http://books.google.co.uk/books?id=1WGPJai1gX8C},
+	Year = {2007},
+	Bdsk-Url-1 = {http://books.google.co.uk/books?id=1WGPJai1gX8C}}
+
 @conference{schoeffler2015mushra,
 	Author = {Schoeffler, Michael and St{\"o}ter, Fabian-Robert and Edler, Bernd and Herre, J{\"u}rgen},
 	Booktitle = {1st Web Audio Conference},
@@ -15,7 +49,7 @@
 	Date-Modified = {2015-09-29 18:37:59 +0000},
 	Title = {Towards the Next Generation of Web-based Experiments: {A} Case Study Assessing Basic Audio Quality Following the {ITU-R} Recommendation {BS}. 1534 ({MUSHRA})},
 	Year = {2015},
-	Bdsk-File-1 = {YnBsaXN0MDDUAQIDBAUGJCVYJHZlcnNpb25YJG9iamVjdHNZJGFyY2hpdmVyVCR0b3ASAAGGoKgHCBMUFRYaIVUkbnVsbNMJCgsMDxJXTlMua2V5c1pOUy5vYmplY3RzViRjbGFzc6INDoACgAOiEBGABIAFgAdccmVsYXRpdmVQYXRoWWFsaWFzRGF0YV8QOi4uLy4uLy4uLy4uL0dvb2dsZSBEcml2ZS9Eb2N1bWVudHMvUGFwZXJzL3dhYzE1X211c2hyYS5wZGbSFwsYGVdOUy5kYXRhTxEBtgAAAAABtgACAAAMTWFjaW50b3NoIEhEAAAAAAAAAAAAAAAAAAAA0Fxdh0grAAAACl8REHdhYzE1X211c2hyYS5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAESaBzSMJQiAAAAAAAAAAAABAAEAAAJIAAAAAAAAAAAAAAAAAAAAAZQYXBlcnMAEAAIAADQXEFnAAAAEQAIAADSMHgCAAAAAQAUAApfEQAKXrsACl0CAAlEXgACk9UAAgBNTWFjaW50b3NoIEhEOlVzZXJzOgBCcmVjaHQ6AEdvb2dsZSBEcml2ZToARG9jdW1lbnRzOgBQYXBlcnM6AHdhYzE1X211c2hyYS5wZGYAAA4AIgAQAHcAYQBjADEANQBfAG0AdQBzAGgAcgBhAC4AcABkAGYADwAaAAwATQBhAGMAaQBuAHQAbwBzAGgAIABIAEQAEgA7VXNlcnMvQnJlY2h0L0dvb2dsZSBEcml2ZS9Eb2N1bWVudHMvUGFwZXJzL3dhYzE1X211c2hyYS5wZGYAABMAAS8AABUAAgAN//8AAIAG0hscHR5aJGNsYXNzbmFtZVgkY2xhc3Nlc11OU011dGFibGVEYXRhox0fIFZOU0RhdGFYTlNPYmplY3TSGxwiI1xOU0RpY3Rpb25hcnmiIiBfEA9OU0tleWVkQXJjaGl2ZXLRJidUcm9vdIABAAgAEQAaACMALQAyADcAQABGAE0AVQBgAGcAagBsAG4AcQBzAHUAdwCEAI4AywDQANgCkgKUApkCpAKtArsCvwLGAs8C1ALhAuQC9gL5Av4AAAAAAAACAQAAAAAAAAAoAAAAAAAAAAAAAAAAAAADAA==}}
+	Bdsk-File-1 = {YnBsaXN0MDDUAQIDBAUGJCVYJHZlcnNpb25YJG9iamVjdHNZJGFyY2hpdmVyVCR0b3ASAAGGoKgHCBMUFRYaIVUkbnVsbNMJCgsMDxJXTlMua2V5c1pOUy5vYmplY3RzViRjbGFzc6INDoACgAOiEBGABIAFgAdccmVsYXRpdmVQYXRoWWFsaWFzRGF0YV8QOi4uLy4uLy4uLy4uL0dvb2dsZSBEcml2ZS9Eb2N1bWVudHMvUGFwZXJzL3dhYzE1X211c2hyYS5wZGbSFwsYGVdOUy5kYXRhTxEBtgAAAAABtgACAAAMTWFjaW50b3NoIEhEAAAAAAAAAAAAAAAAAAAA0Fxdh0grAAAACl8REHdhYzE1X211c2hyYS5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAESjIbSMJQiAAAAAAAAAAAABAAEAAAJIAAAAAAAAAAAAAAAAAAAAAZQYXBlcnMAEAAIAADQXEFnAAAAEQAIAADSMHgCAAAAAQAUAApfEQAKXrsACl0CAAlEXgACk9UAAgBNTWFjaW50b3NoIEhEOlVzZXJzOgBCcmVjaHQ6AEdvb2dsZSBEcml2ZToARG9jdW1lbnRzOgBQYXBlcnM6AHdhYzE1X211c2hyYS5wZGYAAA4AIgAQAHcAYQBjADEANQBfAG0AdQBzAGgAcgBhAC4AcABkAGYADwAaAAwATQBhAGMAaQBuAHQAbwBzAGgAIABIAEQAEgA7VXNlcnMvQnJlY2h0L0dvb2dsZSBEcml2ZS9Eb2N1bWVudHMvUGFwZXJzL3dhYzE1X211c2hyYS5wZGYAABMAAS8AABUAAgAN//8AAIAG0hscHR5aJGNsYXNzbmFtZVgkY2xhc3Nlc11OU011dGFibGVEYXRhox0fIFZOU0RhdGFYTlNPYmplY3TSGxwiI1xOU0RpY3Rpb25hcnmiIiBfEA9OU0tleWVkQXJjaGl2ZXLRJidUcm9vdIABAAgAEQAaACMALQAyADcAQABGAE0AVQBgAGcAagBsAG4AcQBzAHUAdwCEAI4AywDQANgCkgKUApkCpAKtArsCvwLGAs8C1ALhAuQC9gL5Av4AAAAAAAACAQAAAAAAAAAoAAAAAAAAAAAAAAAAAAADAA==}}
 
 @conference{ape,
 	Author = {De Man, Brecht and Joshua D. Reiss},
Binary file docs/WAC2016/WAC2016.pdf has changed
--- a/docs/WAC2016/WAC2016.tex	Tue Sep 29 20:50:53 2015 +0200
+++ b/docs/WAC2016/WAC2016.tex	Tue Sep 29 23:55:10 2015 +0200
@@ -1,4 +1,5 @@
 \documentclass{sig-alternate}
+\usepackage{hyperref}
 
 \begin{document}
 
@@ -125,7 +126,19 @@
 
 
 \section{Introduction}
-	Introducing the paper. Referring to \cite{waet}. Talking about what we do in the various sections of this paper. Pointing out that the header of the paper kind of looks like the Bat-sign. 
+
+	% Listening tests/perceptual audio evaluation: what are they, why are they important
+	% As opposed to limited scope of WAC15 paper: also musical features, realism of sound effects / sound synthesis, performance of source separation and other algorithms... 
+	Perceptual evaluation of audio, in the form of listening tests, is a powerful way to assess anything from audio codec quality over realism of sound synthesis to the performance of source separation, automated music production and 
+	In less technical areas, the framework of a listening test can be used to measure emotional response to music or test cognitive abilities. % maybe some references? If there's space.
+
+	% Why difficult? Challenges? What constitutes a good interface? 
+	Technical, interfaces, user friendliness, reliability 
+
+	Note that the design of an effective listening test further poses many challenges unrelated to interface design, which are beyond the scope of this paper \cite{bech}. 
+
+	% Why in the browser? 
+	Web Audio API has made some essential features like sample manipulation of audio streams possible \cite{schoeffler2015mushra}.
 	
 	Situating the Web Audio Evaluation Tool between other currently available evaluation tools, ...
 	
@@ -135,12 +148,12 @@
         \begin{center}
         	\begin{tabular}{|*{6}{l|}}
         		\hline
-        		\textbf{Name} 	& \textbf{Ref.} 	& \textbf{Language} 	& \textbf{Interfaces} 			& \textbf{Remote} 			& \textbf{Programming} 	\\
+        		\textbf{Name} 	& \textbf{Ref.} 	& \textbf{Language} 	& \textbf{Interfaces} 			& \textbf{Remote} 			& \textbf{All UI} 	\\
         		\hline
-        		APE 			& \cite{ape}		& MATLAB				& multiple stimulus one axis 	& 							& \checkmark			\\
+        		APE 			& \cite{ape}		& MATLAB				& multiple stimulus one axis 	& 							& 			\\
         		BeaqleJS		& \cite{beaqlejs}	& JavaScript			& 								& not natively supported 	& \\
-        		HULTI-GEN 		& \cite{hultigen}	& MAX 					& 								& 							& \\
-        		\textbf{WAET}	& \cite{waet}		& JavaScript			& \textbf{all of the above}		& \checkmark				& \\
+        		HULTI-GEN 		& \cite{hultigen}	& MAX 					& 								& 							& \checkmark \\
+        		\textbf{WAET}	& \cite{waet}		& JavaScript			& \textbf{all of the above}		& \checkmark				& \checkmark \\
         		\hline
         	\end{tabular}
         \end{center}
@@ -151,25 +164,38 @@
         ... However, BeaqleJS \cite{beaqlejs} does not make use of the Web Audio API, %requires programming knowledge... 
 
         % 
-        Selling points: remote tests, visualisaton, create your own test in the browser, many interfaces
+        Selling points: remote tests, visualisaton, create your own test in the browser, many interfaces, few/no dependencies, flexibility
+
+        As recruiting participants can be very time-consuming, and as for some tests a large number of participants is needed, browser-based tests \cite{schoeffler2015mushra}. However, to our knowledge, no tool currently exists that allows the creation of a remotely accessible listening test. % I wonder what you can do with Amazon Mechanical Turk and the likes. 
+
+        [Talking about what we do in the various sections of this paper. Referring to \cite{waet}. ]
 
 	
-\section{Architecture}  % title? 'back end'?
+\section{Architecture}  % title? 'back end'? % NICK
 	A slightly technical overview of the system. Talk about XML, JavaScript, Web Audio API, HTML5. 
 	Describe and/or visualise audioholder-audioelement-... structure. 
 
-	% see also SMC12
+	% see also SMC12 - less detail here
+
+	Which type of files? % WAV, anything else? Perhaps not exhaustive list, but say something along the lines of 'whatever browser supports'
 	
 	Streaming audio? % probably not, unless it's easy
 
-	Compatibility? 
+	Compatibility? % not IE, everything else fine? 
+
+
 	
 	
 \section{Remote tests} % with previous? 
+
+	If the experimenter is willing to trade some degree of control for a higher number of participants, the test can be hosted on a web server so that subjects can take part remotely. This way, a link can be shared widely in the hope of attracting a large amount of subjects, while listening conditions and subject reliability may be less ideal. However, a sound system calibration page and a wide range of metrics logged during the test mitigate these problems. Note also that in some experiments, it may be preferred that the subject has a `real life', familiar listening set-up, for instance when perceived quality differences on everyday sound systems are investigated. 
+	Furthermore, a fully browser-based test, where the collection of the results is automatic, is more efficient and technically reliable even when the test still takes place under lab conditions.
+
 	The following features allow easy and effective remote testing: 
 	\begin{itemize}
 		\item PHP script to collect result XML files
 		\item Randomly pick specified number of audioholders
+		\item Calibration
 		\item Functionality to participate multiple times
 			\begin{itemize}
 				\item Possible to log in with unique ID (no password)
@@ -181,12 +207,18 @@
 			\end{itemize}
 		\item Intermediate saves
 		\item Collect IP address information (privacy issues?) --> geo-related API? 
+		\item Time measurement - see before or 
 	\end{itemize}
 
 	
 \section{Interfaces} % title? 'Front end'? % Dave
 	`Build your own test'
 
+	Elements present to build any of the following interfaces, and many more: axes, markers, labels, anchors, references, reference signal button, stop button, comment boxes, radio buttons, checkboxes, transport/scrubber bar
+
+	Established tests (see below) included as `presets' in the build-your-own-test page. 
+
+
 	We could add more interfaces, such as: 
 	\begin{itemize}
 		\item (APE style) \cite{ape}
@@ -243,6 +275,7 @@
 	A screenshot would be nice. 
 
 \section{Analysis and diagnostics}
+	% don't mention Python scripts
 	It would be great to have easy-to-use analysis tools to visualise the collected data and even do science with it. Even better would be to have all this in the browser. Complete perfection would be achieved if and when only limited setup, installation time, and expertise are required for the average non-CS researcher to use this. 
 	
 	The following could be nice: 
@@ -264,13 +297,21 @@
 	Some pictures here please. 
 
 \section{Concluding remarks and future work}
-	Perhaps an `engineering brief' such as this one doesn't really have a lot of conclusion, except `We made this'. 
 	
-	You can check it out at \url{code.soundsoftware.ac.uk/projects/webaudioevaluationtool}. 
+	The code and documentation can be pulled or downloaded from \url{code.soundsoftware.ac.uk/projects/webaudioevaluationtool}. 
 	
-	Talking a little bit about what else might happen. Unless we really want to wrap this up. 
+	[Talking a little bit about what else might happen. Unless we really want to wrap this up. ]
 
-	Use \cite{schoeffler2015mushra} as a `checklist'.
+	Use \cite{schoeffler2015mushra} as a `checklist', even though it only considers subjective evaluation of audio systems (and focuses on the requirements for a MUSHRA test).
+		% remote
+		% language support (not explicitly stated)
+		% crossfades
+		% choosing speakers/sound device from within browser? 
+		% collect information about software and sound system
+		% buttons, scales, ... UI elements
+		% must be able to load uncompressed PCM
+
+	[What can we not do? `Method of adjustment', as in \cite{schoeffler2015mushra} is another can of worms, because, like, you could adjust lots of things (volume is just one of them, that could be done quite easily). Same for using input signals like the participant's voice. Either leave out, or mention this requires modification of the code we provide.]
 
 %
 % The following two commands are all you need in the