annotate docs/ProjectSpecificationDocument.tex @ 656:0a401224660b

Added dev-main branch warning at top of files
author Nicholas Jillings <n.g.r.jillings@se14.qmul.ac.uk>
date Fri, 10 Apr 2015 10:25:52 +0100
parents
children ef643c350f56
rev   line source
n@656 1 \documentclass{article}
n@656 2
n@656 3 \usepackage[margin=2cm]{geometry}
n@656 4 \usepackage{listings}
n@656 5
n@656 6 \begin{document}
n@656 7
n@656 8 \large APE Browser Tool - Project Specification Document
n@656 9
n@656 10 \section{Document}
n@656 11
n@656 12 An XML file containing all project information to load and execute the project on the client. Certain interfaces are optional, however others are mandatory. This guide should reflect the changes in the XML project and keep track of the versions. Hopwfully this can remain simple!
n@656 13
n@656 14 \section{Root}
n@656 15
n@656 16 The XML root must be \texttt{<BrowserEvalProjectDocument>}. This should be sufficiently identifiable in both itself and in the JavaScript decoding as it will create an object called the root name.
n@656 17
n@656 18 There must also be a \texttt{<version>} tag which has the attribute \texttt{id} containing a numerical representation of the version. Currently everything in this document can be assumed to be version 1. If future updates or corrections are made post delivery this should give the flexibility to ensure past projects still work.
n@656 19
n@656 20 The root will also contain the following tags: setup and tracks.
n@656 21
n@656 22 \section{Setup tag}
n@656 23
n@656 24 The setup tag specifies certain global test settings including: the interface type to use, the project return location and any other setup instructions.
n@656 25
n@656 26 An example of this tag could be:
n@656 27
n@656 28 \texttt{<setup interface="APE" projectReturn="http://project.return.url/goes/here" />}
n@656 29
n@656 30 The setup should not have any element or any children.
n@656 31
n@656 32 \subsection{Attributes}
n@656 33 \begin{itemize}
n@656 34 \item \texttt{interface} - Mandatory, String. Defaults to APE, otherwise use to load any of the available interfaces. Currently only valid string is APE.
n@656 35 \item \texttt{projectReturn} - Mandatory, String. Specify the URL to return the test results. If null client will generate XML locally and prompt user to return the file.
n@656 36 \item \texttt{randomiseOrder} - Optional, default to false. Specify if the order of the tests can be randomised.
n@656 37 \item \texttt{collectMetrics} - Optional, Boolean. Default to false. Determine if the test metrics should be collected. These include how long each test session took etc. The full metrics list can be modified in the 'metrics' tag.
n@656 38 \end{itemize}
n@656 39
n@656 40 \subsection{Elements}
n@656 41 None
n@656 42
n@656 43 \section{AudioHolder tag}
n@656 44
n@656 45 There should be one audioHolder tag per test session, inside which each audioElement is specified as children. The audioHolder tag can help to generalise certain objects. Each audioHolder instance specifies a separate listening test to be paged, each with their own specific requirements.
n@656 46
n@656 47 \subsection{Attributes}
n@656 48 \begin{itemize}
n@656 49 \item \texttt{id} - Mandatory, String. Give an ID string or number to identify the test in the result.
n@656 50 \item \texttt{hostURL} - Optional, String. If all tracks are hosted from the same folder on a server, you can put in the lead here. For instance, if loading http://test.com/tracks/track1.wav and http://test.com/tracks/track2.wav, this could equal http://test.com/tracks/ and the url attribute in the track tag can be track1.wav or track2.wav. Equally http://test.com/ and then using tracks/track1.wav and tracks/track2.wav is valid.
n@656 51 \item \texttt{sampleRate} - Optional, Number. If your test requires a specific sample rate, this should be set to the desired sample rate in Hertz. This does not set the browser to the correct sample rate, but forces the browser to check the sample rate matches. If this is undefined, no sample rate matching will occur.
n@656 52 \item \texttt{randomiseOrder} - Optional, Boolean String. Defaults to false. Determine if the track order should be randomised. Must be true or false.
n@656 53 \item \texttt{repeatCount} - Optional, Number. Defaults to 0 (ie: no repeats). The number of times a test should be repeated.
n@656 54 \end{itemize}
n@656 55
n@656 56 \subsection{Elements}
n@656 57 Contain the audioElements tags and the interfaceSetup tag.
n@656 58
n@656 59 \section{audioElements tag}
n@656 60
n@656 61 This must reside as children in the audioHolder tag. There must be one audioElement tag per sound sample to load into the test.
n@656 62
n@656 63 \subsection{Attributes}
n@656 64 \begin{itemize}
n@656 65 \item \texttt{url} - Mandatory, String. Contain the full URL to the track. If the Tracks tag hostURL is set, concatenate this tag with the hostURL attribute to obtain the full URL.
n@656 66 \item \texttt{ID} - Optional, Number. Give the track a specific ID for the return. This will help if using multiple projects to spread a test across multiple sessions and/or locations, where each test will not use all the samples. If one audioElement is given the ID 3, the next audioElement (assuming it does not have an ID set itself) will have the ID of 4. This continues until the next audioElement with the ID attribute set is reached.
n@656 67 \end{itemize}
n@656 68
n@656 69 \section{interfaceSetup}
n@656 70
n@656 71 This is contained within the audioHolder tag and outlines test instance specific requirements. These include the following children tags: title - question title at the top of the page, scaleMin - minimum scale value text, scaleMax - maximum scale value text, scaleMid - halfway scale value text. There is also a preTest tag here allowing for specific questions/statements to be presented before running this specific test.
n@656 72
n@656 73 \section {CommentQuestion tag}
n@656 74
n@656 75 This is a 1st level tag (same level as AudioHolder and setup). This allows another question and comment box to be presented on the page. The results of these are passed back in the results XML with both the comment and the question.
n@656 76
n@656 77 \subsection{Attributes}
n@656 78 None.
n@656 79
n@656 80 \subsection{Elements}
n@656 81 The question to be presented.
n@656 82
n@656 83 \section {PreTest tag and PostTest tag}
n@656 84
n@656 85 These are 1st level tags. The PreTest tag allows for the specifying of pre test instructions and questions. These appear as a pop-up style window with next buttons and other automatic GUI. The postTest tag allows for specifying post test instructions, questions and resources. These appear as a pop-up style window after the submit button is pressed.
n@656 86
n@656 87 \subsection{Attributes}
n@656 88 None.
n@656 89
n@656 90 \subsection{Elements}
n@656 91 Takes the \texttt{statement} and \texttt{question} tags. The order these are presented in the XML define the order they appear on the screen.
n@656 92
n@656 93 \subsubsection{Statement}
n@656 94
n@656 95 The statement tag simply prints the included string verbatim on a 'pop-up' window with a next button.
n@656 96
n@656 97 \subsubsection{Question}
n@656 98
n@656 99 This allows for a question to be asked pre/post the test. This is added to the response XML in the same location as the other common/global questions. The response includes both the question asked and the response. This takes two attributes, id and mandatory. ID is a mandatory field. The same ID will be used in the results so it is important it is properly entered. Mandatory is optional. True means the field must be entered before continuing.
n@656 100
n@656 101 \subsubsection{Resource}
n@656 102
n@656 103 The resource tag is only available in the postTest tag. This allows for the linking to some external resource via the href attribute.
n@656 104
n@656 105 \section{Metric tag}
n@656 106 A 1st level tag, metrics must be declared in the setup tag. This takes a set of children 'metricEnable' to define which metrics to collect and present.
n@656 107
n@656 108 \subsection{metricEnable tag}
n@656 109 This takes a single attribute to determine which metric to enable for collection. Some of these are a global, per track or per test instance.
n@656 110 \begin{itemize}
n@656 111 \item testTimer - Return the global test timer and test instance timers. Measures the time between the first start and final submit.
n@656 112 \item elementTimer - Return the total time each audioElement in each test was listened too. Measures time between successive clicks on the track changer
n@656 113 \item elementTracker - Return the initial position of each track
n@656 114 \item elementTrackerFull - Return an enumerated pair of time and position. Track the entire movement of each element position. NOTE: Will override the elementTracker option above and throw an error into the browser console.
n@656 115 \item elementFlagListenedTo - Return a boolean per elementck to see if the element was listened to
n@656 116 \item elementFlagMoved - Return a boolean per element to see if the element slider was moved.
n@656 117 \item elementFlagComments - Return a boolean per element to see if the element has comments.
n@656 118 \end{itemize}
n@656 119
n@656 120 \section{Feature List}
n@656 121 \begin{itemize}
n@656 122 \item Paging listening tests - eg. Ask multiple questions in each experiment
n@656 123 \item Labels on X axis - scale
n@656 124 \item Input questions/comment at top to guide towards the question being asked.
n@656 125 \item Randomise track numbers -(inc. comment boxes and relate back to correct reference track)
n@656 126 \item Randomise order of individual tests
n@656 127 \item Save output XML file to remote server
n@656 128 \item Tests Metrics
n@656 129 \begin{itemize}
n@656 130 \item Duration of listening to each track
n@656 131 \item Time spent on each individual test
n@656 132 \item Start and end position of every track
n@656 133 \item Flags on each track, to ensure each track (but may not restrict users from submitting)
n@656 134 \begin{itemize}
n@656 135 \item Has been listened to
n@656 136 \item Has been moved
n@656 137 \item Has comments about it
n@656 138 \end{itemize}
n@656 139 \end{itemize}
n@656 140 \end{itemize}
n@656 141
n@656 142 \subsection{Advanced feature list}
n@656 143 \begin{itemize}
n@656 144 \item Repeat each tests number of times (2 or 3?) to remove learning / experience bias and ensure that the order is consistent
n@656 145 \item Perform Loudness equalisation on all tracks
n@656 146 \item Selection of test type
n@656 147 \item Pre-test of some basic hearing test
n@656 148 \begin{itemize}
n@656 149 \item MUSHRA (with vertical slider per track)
n@656 150 \item APE (Single horizontal slider)
n@656 151 \item AB Test
n@656 152 \end{itemize}
n@656 153 \end{itemize}
n@656 154
n@656 155
n@656 156
n@656 157 \section{Example}
n@656 158
n@656 159 Here is an example XML structure
n@656 160
n@656 161 \begin{lstlisting}
n@656 162 <?xml version="1.0" encoding="utf-8"?>
n@656 163 <BrowserEvalProjectDocument>
n@656 164 <setup interface="APE" projectReturn="null" />
n@656 165 <AudioHolder hostURL="example_eval/" sampleRate="44100"
n@656 166 sampleRateExplicit="true">
n@656 167 <audioElements url="0.wav" ID="0"/>
n@656 168 <audioElements url="1.wav"/>
n@656 169 <audioElements url="2.wav"/>
n@656 170 <audioElements url="3.wav"/>
n@656 171 <audioElements url="4.wav"/>
n@656 172 <audioElements url="5.wav"/>
n@656 173 <audioElements url="6.wav"/>
n@656 174 <audioElements url="7.wav"/>
n@656 175 <audioElements url="8.wav"/>
n@656 176 <audioElements url="9.wav"/>
n@656 177 <audioElements url="10.wav"/>
n@656 178 </AudioHolder>
n@656 179 <CommentQuestion>What is your mixing experiance</CommentQuestion>
n@656 180 <PreTest>
n@656 181 <statement>Please listen to all mixes</statement>
n@656 182 </PreTest>
n@656 183 <PostTest>
n@656 184 <statement>Thank you for taking this listening test.</statement>
n@656 185 <question>Please enter your name.</question>
n@656 186 </PostTest>
n@656 187 </BrowserEvalProjectDocument>
n@656 188 \end{lstlisting}
n@656 189
n@656 190
n@656 191
n@656 192 \end{document}