annotate docs/ProjectSpecificationDocument.tex @ 672:ef643c350f56

Updated the interface modification to the specification document. Updated example XML at bottom of the page
author Nicholas Jillings <n.g.r.jillings@se14.qmul.ac.uk>
date Sat, 11 Apr 2015 11:45:04 +0100
parents 0a401224660b
children 89e08a7e0b6b
rev   line source
n@656 1 \documentclass{article}
n@656 2
n@656 3 \usepackage[margin=2cm]{geometry}
n@656 4 \usepackage{listings}
n@656 5
n@656 6 \begin{document}
n@656 7
n@656 8 \large APE Browser Tool - Project Specification Document
n@656 9
n@656 10 \section{Document}
n@656 11
n@656 12 An XML file containing all project information to load and execute the project on the client. Certain interfaces are optional, however others are mandatory. This guide should reflect the changes in the XML project and keep track of the versions. Hopwfully this can remain simple!
n@656 13
n@656 14 \section{Root}
n@656 15
n@656 16 The XML root must be \texttt{<BrowserEvalProjectDocument>}. This should be sufficiently identifiable in both itself and in the JavaScript decoding as it will create an object called the root name.
n@656 17
n@656 18 There must also be a \texttt{<version>} tag which has the attribute \texttt{id} containing a numerical representation of the version. Currently everything in this document can be assumed to be version 1. If future updates or corrections are made post delivery this should give the flexibility to ensure past projects still work.
n@656 19
n@656 20 The root will also contain the following tags: setup and tracks.
n@656 21
n@656 22 \section{Setup tag}
n@656 23
n@656 24 The setup tag specifies certain global test settings including: the interface type to use, the project return location and any other setup instructions.
n@656 25
n@656 26 An example of this tag could be:
n@656 27
n@656 28 \texttt{<setup interface="APE" projectReturn="http://project.return.url/goes/here" />}
n@656 29
n@656 30 The setup should not have any element or any children.
n@656 31
n@656 32 \subsection{Attributes}
n@656 33 \begin{itemize}
n@656 34 \item \texttt{interface} - Mandatory, String. Defaults to APE, otherwise use to load any of the available interfaces. Currently only valid string is APE.
n@656 35 \item \texttt{projectReturn} - Mandatory, String. Specify the URL to return the test results. If null client will generate XML locally and prompt user to return the file.
n@656 36 \item \texttt{randomiseOrder} - Optional, default to false. Specify if the order of the tests can be randomised.
n@656 37 \item \texttt{collectMetrics} - Optional, Boolean. Default to false. Determine if the test metrics should be collected. These include how long each test session took etc. The full metrics list can be modified in the 'metrics' tag.
n@656 38 \end{itemize}
n@656 39
n@656 40 \subsection{Elements}
n@656 41 None
n@656 42
n@656 43 \section{AudioHolder tag}
n@656 44
n@656 45 There should be one audioHolder tag per test session, inside which each audioElement is specified as children. The audioHolder tag can help to generalise certain objects. Each audioHolder instance specifies a separate listening test to be paged, each with their own specific requirements.
n@656 46
n@656 47 \subsection{Attributes}
n@656 48 \begin{itemize}
n@656 49 \item \texttt{id} - Mandatory, String. Give an ID string or number to identify the test in the result.
n@656 50 \item \texttt{hostURL} - Optional, String. If all tracks are hosted from the same folder on a server, you can put in the lead here. For instance, if loading http://test.com/tracks/track1.wav and http://test.com/tracks/track2.wav, this could equal http://test.com/tracks/ and the url attribute in the track tag can be track1.wav or track2.wav. Equally http://test.com/ and then using tracks/track1.wav and tracks/track2.wav is valid.
n@656 51 \item \texttt{sampleRate} - Optional, Number. If your test requires a specific sample rate, this should be set to the desired sample rate in Hertz. This does not set the browser to the correct sample rate, but forces the browser to check the sample rate matches. If this is undefined, no sample rate matching will occur.
n@656 52 \item \texttt{randomiseOrder} - Optional, Boolean String. Defaults to false. Determine if the track order should be randomised. Must be true or false.
n@656 53 \item \texttt{repeatCount} - Optional, Number. Defaults to 0 (ie: no repeats). The number of times a test should be repeated.
n@656 54 \end{itemize}
n@656 55
n@656 56 \subsection{Elements}
n@656 57 Contain the audioElements tags and the interfaceSetup tag.
n@656 58
n@656 59 \section{audioElements tag}
n@656 60
n@656 61 This must reside as children in the audioHolder tag. There must be one audioElement tag per sound sample to load into the test.
n@656 62
n@656 63 \subsection{Attributes}
n@656 64 \begin{itemize}
n@656 65 \item \texttt{url} - Mandatory, String. Contain the full URL to the track. If the Tracks tag hostURL is set, concatenate this tag with the hostURL attribute to obtain the full URL.
n@656 66 \item \texttt{ID} - Optional, Number. Give the track a specific ID for the return. This will help if using multiple projects to spread a test across multiple sessions and/or locations, where each test will not use all the samples. If one audioElement is given the ID 3, the next audioElement (assuming it does not have an ID set itself) will have the ID of 4. This continues until the next audioElement with the ID attribute set is reached.
n@656 67 \end{itemize}
n@656 68
n@672 69 \section{interface tag}
n@656 70
n@672 71 This is contained within the audioHolder tag and outlines test instance specific requirements. These include the following children tags:
n@672 72 \begin{itemize}
n@672 73 \item 'title' - Contains the test title to be shown at the top of the page
n@672 74 \item 'scale' - Takes the attribute position to be a value between 0 and 100 indicating where on the scale to place the text contained inside.
n@672 75 \end{itemize}
n@656 76
n@656 77 \section {CommentQuestion tag}
n@656 78
n@656 79 This is a 1st level tag (same level as AudioHolder and setup). This allows another question and comment box to be presented on the page. The results of these are passed back in the results XML with both the comment and the question.
n@656 80
n@656 81 \subsection{Attributes}
n@656 82 None.
n@656 83
n@656 84 \subsection{Elements}
n@656 85 The question to be presented.
n@656 86
n@656 87 \section {PreTest tag and PostTest tag}
n@656 88
n@656 89 These are 1st level tags. The PreTest tag allows for the specifying of pre test instructions and questions. These appear as a pop-up style window with next buttons and other automatic GUI. The postTest tag allows for specifying post test instructions, questions and resources. These appear as a pop-up style window after the submit button is pressed.
n@656 90
n@656 91 \subsection{Attributes}
n@656 92 None.
n@656 93
n@656 94 \subsection{Elements}
n@656 95 Takes the \texttt{statement} and \texttt{question} tags. The order these are presented in the XML define the order they appear on the screen.
n@656 96
n@656 97 \subsubsection{Statement}
n@656 98
n@656 99 The statement tag simply prints the included string verbatim on a 'pop-up' window with a next button.
n@656 100
n@656 101 \subsubsection{Question}
n@656 102
n@656 103 This allows for a question to be asked pre/post the test. This is added to the response XML in the same location as the other common/global questions. The response includes both the question asked and the response. This takes two attributes, id and mandatory. ID is a mandatory field. The same ID will be used in the results so it is important it is properly entered. Mandatory is optional. True means the field must be entered before continuing.
n@656 104
n@656 105 \subsubsection{Resource}
n@656 106
n@656 107 The resource tag is only available in the postTest tag. This allows for the linking to some external resource via the href attribute.
n@656 108
n@656 109 \section{Metric tag}
n@656 110 A 1st level tag, metrics must be declared in the setup tag. This takes a set of children 'metricEnable' to define which metrics to collect and present.
n@656 111
n@656 112 \subsection{metricEnable tag}
n@656 113 This takes a single attribute to determine which metric to enable for collection. Some of these are a global, per track or per test instance.
n@656 114 \begin{itemize}
n@656 115 \item testTimer - Return the global test timer and test instance timers. Measures the time between the first start and final submit.
n@656 116 \item elementTimer - Return the total time each audioElement in each test was listened too. Measures time between successive clicks on the track changer
n@656 117 \item elementTracker - Return the initial position of each track
n@656 118 \item elementTrackerFull - Return an enumerated pair of time and position. Track the entire movement of each element position. NOTE: Will override the elementTracker option above and throw an error into the browser console.
n@656 119 \item elementFlagListenedTo - Return a boolean per elementck to see if the element was listened to
n@656 120 \item elementFlagMoved - Return a boolean per element to see if the element slider was moved.
n@656 121 \item elementFlagComments - Return a boolean per element to see if the element has comments.
n@656 122 \end{itemize}
n@656 123
n@656 124 \section{Feature List}
n@656 125 \begin{itemize}
n@656 126 \item Paging listening tests - eg. Ask multiple questions in each experiment
n@656 127 \item Labels on X axis - scale
n@656 128 \item Input questions/comment at top to guide towards the question being asked.
n@656 129 \item Randomise track numbers -(inc. comment boxes and relate back to correct reference track)
n@656 130 \item Randomise order of individual tests
n@656 131 \item Save output XML file to remote server
n@656 132 \item Tests Metrics
n@656 133 \begin{itemize}
n@656 134 \item Duration of listening to each track
n@656 135 \item Time spent on each individual test
n@656 136 \item Start and end position of every track
n@656 137 \item Flags on each track, to ensure each track (but may not restrict users from submitting)
n@656 138 \begin{itemize}
n@656 139 \item Has been listened to
n@656 140 \item Has been moved
n@656 141 \item Has comments about it
n@656 142 \end{itemize}
n@656 143 \end{itemize}
n@656 144 \end{itemize}
n@656 145
n@656 146 \subsection{Advanced feature list}
n@656 147 \begin{itemize}
n@656 148 \item Repeat each tests number of times (2 or 3?) to remove learning / experience bias and ensure that the order is consistent
n@656 149 \item Perform Loudness equalisation on all tracks
n@656 150 \item Selection of test type
n@656 151 \item Pre-test of some basic hearing test
n@656 152 \begin{itemize}
n@656 153 \item MUSHRA (with vertical slider per track)
n@656 154 \item APE (Single horizontal slider)
n@656 155 \item AB Test
n@656 156 \end{itemize}
n@656 157 \end{itemize}
n@656 158
n@656 159
n@656 160
n@656 161 \section{Example}
n@656 162
n@656 163 Here is an example XML structure
n@656 164
n@656 165 \begin{lstlisting}
n@656 166 <?xml version="1.0" encoding="utf-8"?>
n@656 167 <BrowserEvalProjectDocument>
n@672 168 <setup interface="APE" projectReturn="null" randomiseOrder='true' collectMetrics='true'>
n@672 169 <PreTest>
n@672 170 <statement>Please listen to all mixes</statement>
n@672 171 <question id="location" mandatory="true">Please enter your listening location</question>
n@672 172 </PreTest>
n@672 173 <PostTest>
n@672 174 <statement>Thank you for taking this listening test.</statement>
n@672 175 <question id="SessionID">Please enter your name.</question>
n@672 176 </PostTest>
n@672 177 <Metric>
n@672 178 <metricEnable>testTimer</metricEnable>
n@672 179 <metricEnable>elementTimer</metricEnable>
n@672 180 <metricEnable>elementTracker</metricEnable>
n@672 181 <metricEnable>elementFlagListenedTo</metricEnable>
n@672 182 <metricEnable>elementFlagMoved</metricEnable>
n@672 183 </Metric>
n@672 184 </setup>
n@672 185 <audioHolder id='0' hostURL="example_eval/" sampleRate="44100" randomiseOrder='true' repeatCount='1'>
n@672 186 <interface>
n@672 187 <title>Example Test Question</title>
n@672 188 <scale position="0">Min</scale>
n@672 189 <scale position="100">Max</scale>
n@672 190 <scale position="50">Middle</scale>
n@672 191 <scale position="20">20</scale>
n@672 192 </interface>
n@672 193 <audioElements url="0.wav" id="0"/>
n@672 194 <audioElements url="1.wav" id="1"/>
n@672 195 <audioElements url="2.wav" id="2"/>
n@672 196 <audioElements url="3.wav" id="3"/>
n@672 197 <audioElements url="4.wav" id="4"/>
n@672 198 <audioElements url="5.wav" id="5"/>
n@672 199 <audioElements url="6.wav" id="6"/>
n@672 200 <audioElements url="7.wav" id="7"/>
n@672 201 <audioElements url="8.wav" id="8"/>
n@672 202 <audioElements url="9.wav" id="9"/>
n@672 203 <audioElements url="10.wav" id="10"/>
n@672 204 <CommentQuestion id='mixingExperiance'>What is your mixing experiance</CommentQuestion>
n@672 205 <PreTest>
n@672 206 <statement>Start the Test 3</statement>
n@672 207 </PreTest>
n@672 208 <PostTest>
n@672 209 <statement>Please take a break before the next test</statement>
n@672 210 <question id="testComment">How did you find the test</question>
n@672 211 </PostTest>
n@672 212 </audioHolder>
n@656 213 </BrowserEvalProjectDocument>
n@656 214 \end{lstlisting}
n@656 215
n@656 216
n@656 217
n@656 218 \end{document}