changeset 62:e2b9ccb92973

updating things
author christopherh <christopher.harte@eecs.qmul.ac.uk>
date Mon, 27 Apr 2015 16:30:13 +0100
parents bae079ff88e2
children a68a4e73b3ae
files SMC2015latex/csong.bib SMC2015latex/images/framework.pdf SMC2015latex/images/framework.svg SMC2015latex/section/background.tex SMC2015latex/section/dataset.tex SMC2015latex/section/framework.tex SMC2015latex/section/introduction.tex SMC2015latex/syncopation_toolkit.tex
diffstat 8 files changed, 128 insertions(+), 91 deletions(-) [+]
line wrap: on
line diff
--- a/SMC2015latex/csong.bib	Mon Apr 27 13:10:10 2015 +0100
+++ b/SMC2015latex/csong.bib	Mon Apr 27 16:30:13 2015 +0100
@@ -948,12 +948,19 @@
 }
 
 @misc {Song14URL,
-	author = {Chunyang Song},
+	author = {Chunyang Song and Christopher Harte and Marcus Pearce},
 	title = {C4{DM} Syncopation Dataset and Toolkit},
 	year = {2014},
 	howpublished = {{{https://code.soundsoftware.ac.uk/projects/syncopation-dataset}}}
 }
 
+@PhdThesis{Song15thesis,
+author = {Chunyang Song},
+title = {Syncopation: Unifying Music Theory and Preception},
+school = {School of Electronic Engineering and Computer Science, Queen Mary, University of London},
+year = {2015},
+}
+
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 
 %Evaluations
 @InProceedings{Gomez07,
Binary file SMC2015latex/images/framework.pdf has changed
--- a/SMC2015latex/images/framework.svg	Mon Apr 27 13:10:10 2015 +0100
+++ b/SMC2015latex/images/framework.svg	Mon Apr 27 16:30:13 2015 +0100
@@ -24,14 +24,14 @@
      borderopacity="1.0"
      inkscape:pageopacity="0.0"
      inkscape:pageshadow="2"
-     inkscape:zoom="2.8284271"
-     inkscape:cx="172.42683"
-     inkscape:cy="616.87262"
+     inkscape:zoom="2"
+     inkscape:cx="321.90914"
+     inkscape:cy="623.49062"
      inkscape:document-units="px"
      inkscape:current-layer="layer1"
      showgrid="false"
      inkscape:window-width="1440"
-     inkscape:window-height="776"
+     inkscape:window-height="856"
      inkscape:window-x="0"
      inkscape:window-y="0"
      inkscape:window-maximized="1" />
@@ -43,7 +43,7 @@
         <dc:format>image/svg+xml</dc:format>
         <dc:type
            rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-        <dc:title />
+        <dc:title></dc:title>
       </cc:Work>
     </rdf:RDF>
   </metadata>
@@ -54,87 +54,104 @@
     <rect
        style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
        id="rect3765"
-       width="252.50005"
-       height="165.35716"
+       width="384.00003"
+       height="165.35718"
        x="101.20461"
        y="352.005"
        ry="0" />
     <path
        style="fill:#f2f2f2;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dashoffset:0"
-       d="m 166.56175,439.14789 41.29525,0 0,-47.85671 38.214,-4.3e-4 0,47.85714 42.27647,0 0,39.2857 -121.78572,0 z"
+       d="m 168.56177,439.14789 41.29525,0 0,-47.85671 165.714,-4.3e-4 0,47.85714 42.27647,0 0,39.2857 -249.28572,0 z"
        id="rect3755"
        inkscape:connector-curvature="0"
        sodipodi:nodetypes="ccccccccc" />
     <rect
        style="fill:#f2f2f2;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
        id="rect2985"
-       width="252.50002"
+       width="384"
        height="39.285706"
-       x="101.20461"
+       x="101.20463"
        y="478.43359"
        ry="0" />
     <text
        xml:space="preserve"
-       style="font-size:14.1577282px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
-       x="173.8896"
+       style="font-size:14.1577282px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Courier 10 Pitch;-inkscape-font-specification:Courier 10 Pitch"
+       x="229.72226"
        y="503.08524"
        id="text3757"
        sodipodi:linespacing="125%"><tspan
          sodipodi:role="line"
          id="tspan3759"
-         x="173.8896"
-         y="503.08524">basic functions</tspan></text>
+         x="229.72226"
+         y="503.08524">basic_functions</tspan></text>
     <text
        sodipodi:linespacing="125%"
        id="text3761"
-       y="464.51382"
-       x="178.45215"
-       style="font-size:14.1577282px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       y="412.46429"
+       x="238.47263"
+       style="font-size:14.1577282px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Courier 10 Pitch;-inkscape-font-specification:Courier 10 Pitch"
        xml:space="preserve"><tspan
-         y="464.51382"
-         x="178.45215"
+         y="412.46429"
+         x="238.47263"
          id="tspan3763"
-         sodipodi:role="line">music objects</tspan></text>
+         sodipodi:role="line">music_objects</tspan></text>
     <rect
        ry="0"
        y="352.005"
-       x="101.20461"
+       x="101.32963"
        height="39.285706"
-       width="252.50002"
+       width="383.75"
        id="rect3767"
        style="fill:#f2f2f2;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
     <text
        xml:space="preserve"
-       style="font-size:14.1577282px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
-       x="265.91846"
-       y="421.65668"
+       style="font-size:14.1577282px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Courier 10 Pitch;-inkscape-font-specification:Courier 10 Pitch"
+       x="383.69052"
+       y="412.46429"
        id="text3775"
        sodipodi:linespacing="125%"><tspan
          sodipodi:role="line"
          id="tspan3777"
-         x="265.91846"
-         y="421.65668">file reader</tspan></text>
+         x="383.69052"
+         y="412.46429">file reader</tspan></text>
     <text
        sodipodi:linespacing="125%"
        id="text3779"
-       y="421.65668"
-       x="131.27559"
-       style="font-size:14.1577282px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       y="412.46429"
+       x="137.27559"
+       style="font-size:14.1577282px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Courier 10 Pitch;-inkscape-font-specification:Courier 10 Pitch"
        xml:space="preserve"><tspan
-         y="421.65668"
-         x="131.27559"
+         y="412.46429"
+         x="137.27559"
          id="tspan3781"
          sodipodi:role="line">model</tspan></text>
     <text
        sodipodi:linespacing="125%"
        id="text3783"
        y="374.53537"
-       x="205.56129"
-       style="font-size:14.1577282px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="271.52029"
+       style="font-size:14.1577282px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Courier 10 Pitch;-inkscape-font-specification:Courier 10 Pitch"
        xml:space="preserve"><tspan
          y="374.53537"
-         x="205.56129"
+         x="271.52029"
          id="tspan3785"
          sodipodi:role="line">synpy</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:15.80732155px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Courier 10 Pitch;-inkscape-font-specification:Courier 10 Pitch"
+       x="293.41452"
+       y="455.31256"
+       id="text2996"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         x="294.96283"
+         y="455.31256"
+         style="font-size:11.16515446px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Arial;-inkscape-font-specification:Arial"
+         id="tspan3002">Classes: Bar, BarList, NoteSequence, </tspan><tspan
+         sodipodi:role="line"
+         x="293.41452"
+         y="469.26901"
+         style="font-size:11.16515446px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Arial;-inkscape-font-specification:Arial"
+         id="tspan3008">VelocitySequence, TimeSignature, Note</tspan></text>
   </g>
 </svg>
--- a/SMC2015latex/section/background.tex	Mon Apr 27 13:10:10 2015 +0100
+++ b/SMC2015latex/section/background.tex	Mon Apr 27 16:30:13 2015 +0100
@@ -4,26 +4,32 @@
 \subsection{Rhythm representation}
 \label{sec:background:rhythm}
 In this section, we introduce some key concepts to assist readers in understanding the mechanisms of 
-each syncopation model. Please refer to \cite{Song14thesis} for the detailed explanation all relevant rhythmic concepts in music theory and their mathematical notations.
+each syncopation model. Please refer to \cite{Song15thesis} for the detailed explanation all relevant rhythmic concepts in music theory and their mathematical notations.
 
-\begin{figure}
-\centerline{\epsfig{figure=images/general3.pdf, width=\columnwidth}}
-\caption{\textbf{An example note sequence}. Two note events $\note_0$ and $\note_1$ occur in the time-span between time origin $\timeorigin$ and end time $\timeend$. The time-span duration $\timespan$ is three quarter-note periods. The rests at the start and end of the bar are not explicitly represented as objects in their own right here but as periods where no notes sound.}
+\begin{figure}[t]
+\centering
+\includegraphics[width=\columnwidth]{images/general3.pdf}
+\caption{An example note sequence. Two note events $\note_0$ and $\note_1$ occur in the time-span between time origin $\timeorigin$ and end time $\timeend$. The time-span duration $\timespan$ is three quarter-note periods. The rests at the start and end of the bar are not explicitly represented as objects in their own right here but as periods where no notes sound.}
 \label{fig:general}
 \end{figure}
 
+
+
 \subsubsection{Time-span}
 \label{sec:background:rhythm:timespan}
 The term \emph{time-span} has been defined as the period between two points in time, including all time points in between \cite{Lerdahl_Jackendoff83GTTM}. To represent a given rhythm, we must specify the time-span within which it occurs by defining a reference time origin $\timeorigin$ and end time $\timeend$, the total duration $\timespan$ of which is $\timespan = \timeend-\timeorigin$ (Figure~\ref{fig:general}.
 
 The basic time unit is in \emph{ticks} as opposed to seconds, therefore we set the parameter Ticks Per Quarter-note (TPQ) to describe the time-span of a length of rhythm. The minimum TPQ is determined by the rhythm-pattern so that all the events can be represented. As demonstrated in Figure~\ref{fig:clave}, the \emph{Son} clave rhythm pattern could be represented both at 8 and 4 ticks per quarter-note but the minimum representable resolution would be 4.
 
-\begin{figure}
-\centerline{\epsfig{figure=images/clave_tpq.pdf, width=\columnwidth}}
-\caption{\textbf{The representation of \emph{Son} clave rhythm in different settings of Ticks Per Quarter-note (TPQ)}. Each quarter-note is represented by 8 and 4 ticks in (a) and (b) respectively, thus all the sounded notes are captured (highlighted by the blue circles); however in (c) where TQP is 2, the second note cannot be represented by this resolution.}
+\begin{figure}[t]
+\centering
+\includegraphics[width=0.85\columnwidth]{images/clave_tpq.pdf}
+\caption{The representation of \emph{Son} clave rhythm in different settings of Ticks Per Quarter-note (TPQ). Each quarter-note is represented by 8 and 4 ticks in (a) and (b) respectively, thus all the sounded notes are captured (highlighted by the blue circles); however in (c) where TQP is 2, the second note cannot be represented by this resolution.}
 \label{fig:clave}
 \end{figure}
 
+
+
 \subsubsection{Note and rhythm}
 \label{sec:background:rhythm:note}
 A single, \emph{note} event $\note$ occurring in this time-span may be described by the tuple $(\starttime, \durationtime, \velocity)$ as shown in Figure~\ref{fig:general}, where $\starttime$ represents start or \emph{onset} time relative to $\timeorigin$, $\durationtime$ represents note duration in the same units and $\velocity$ represents the note \emph{velocity} (i.e. the dynamic; how loud or accented the event is relative to others), where $\velocity > 0$.
@@ -59,29 +65,41 @@
 
 \subsubsection{Metrical structure and time-signature}
 \label{sec:background:rhythm:meter}
-\begin{figure}
-\centerline{\epsfig{figure=figs/ch_model/meter_hierarchy7.pdf, width=0.85\columnwidth}}
-\shortCap{Metrical hierarchies for different time-signatures.}{(a) A simple-duple hierarchy dividing the bar into two groups of two (as with a 4/4 time-signature). (b) A compound-duple hierarchy dividing a bar into two beats, each of which is subdivided by three (e.g. 6/8 time-signature). Reading the weights from left to right in any level $\metriclevel$ gives the elements in sequence $\metricvector_\metriclevel$}
+
+\begin{figure}[t]
+\centering
+\includegraphics[width=\columnwidth]{images/meter_hierarchy7.pdf}
+\caption{Metrical hierarchies for different time-signatures.(a) A simple-duple hierarchy dividing the bar into two groups of two (as with a 4/4 time-signature). (b) A compound-duple hierarchy dividing a bar into two beats, each of which is subdivided by three (e.g. 6/8 time-signature). Reading the weights from left to right in any level $\metriclevel$ gives the elements in sequence $\metricvector_\metriclevel$}
 \label{fig:meter-hierarchy}
 \end{figure}
 
+
+
 Isochronous-meter is formed with a multi-level hierarchical metrical structure~\cite{Lerdahl_Jackendoff83GTTM, London04Meter}. As shown in Figure~\ref{fig:meter-hierarchy}, under a certain metrical hierarchy, a bar is divided by a subdivision factor $\subdivision$ at each metrical level with index $\metriclevel$ where $\metriclevel \in [0, \levelmax]$. The list of subdivision factors is referred as a \emph{subdivision sequence}. 
 
 Events at different metrical positions vary in perceptual salience or \emph{metrical weight}~\cite{Palmer_Krumhansl90}. These weights may be represented as a \emph{weight sequence} $\metricweightset = \langle \metricweight_0, \metricweight_1, ... \metricweight_{\levelmax}\rangle$. The prevailing hypothesis for the assignment of weights in the metrical hierarchy is that a time point that exists in both the current metrical level and the level above is said to have a \emph{strong} weight compared gto time points that are not also present in the level above~\cite{Lerdahl_Jackendoff83GTTM}. The choice of values for the weights in $\metricweightset$ can vary between different models but the assignment of weights to nodes is common to all as in ~\cite{Lerdahl_Jackendoff83GTTM}.
 
 \subsection{Syncopation models}
 \label{sec:background:models}
-In this section we give a brief review of each implemented syncopation model, including their general hypothesis, mechanism and scope of capabilities. 
+In this section we give a brief review of each implemented syncopation model, including their general hypothesis and mechanism.   To compare the capabilities of each model, we give an overview of the musical features each captures in Table~\ref{ta:capabilites}. For a detailed review of these models see \cite{Song15thesis}.
 
 \subsubsection{Longuet-Higgins and Lee 1984 (\lhl)}
 \label{sec:background:models:lhl}
 
-Longuet-Higgins and Lee's \cite{LHL84} decomposes rhythm pattern into a tree structure from Section~\ref{sec:background:rhythm:meter} with metrical weights as $\metricweight_\metriclevel = -\metriclevel$ for all $\metricweight_\metriclevel \in \metricweightset$ i.e. $\metricweightset = \langle 0,-1,-2, ... \rangle$.
-The hypothesis of this model is that a syncopation occurs when a rest ($\RestNode$) in one metrical position follows a note ($\NoteNode$) in a weaker position.  Where such a note-rest pair occurs, the difference in their metrical weights is taken as a local syncopation score. Summing  the local scores produces the syncopation prediction for the whole rhythm sequence. 
+Longuet-Higgins and Lee's model \cite{LHL84} decomposes rhythm patterns into a tree structure as described in Section~\ref{sec:background:rhythm:meter} with metrical weights $\metricweight_\metriclevel = -\metriclevel$ for all $\metricweight_\metriclevel \in \metricweightset$ i.e. $\metricweightset = \langle 0,-1,-2, ... \rangle$.
+The hypothesis of this model is that a syncopation occurs when a rest ($\RestNode$) in one metrical position follows a note ($\NoteNode$) in a weaker position.  Where such a note-rest pair occurs, the difference in their metrical weights is taken as a local syncopation score. Summing the local scores produces the syncopation prediction for the whole rhythm sequence. 
 
 \subsubsection{Pressing 1997 (\pressing)}
 \label{sec:background:models:prs}
-Pressing's cognitive complexity model~\cite{Pressing97,Pressing93} specifies six prototype binary sequences and ranks them in terms of \emph{cognitive cost}. For example, the \emph{filled} prototype that has a note in ever position of the sequence (e.g. $\langle 0,1,1,1,0,1,1,1 \rangle$) cost less than the \emph{syncopated} prototype that has a 0 in the first, strongest metrical position (e.g. $\langle 0,1,1,1,0,1,1,1 \rangle$)(refer~\cite{Song14thesis} for details). The model analyses the cost for the whole rhythm-pattern and its sub-sequences at each metrical level determined by $\subdivision_\metriclevel$. The final output will be a weighted sum of the costs by the number of sub-sequences in each level.
+Pressing's cognitive complexity model~\cite{Pressing97,Pressing93} specifies six prototype binary sequences and ranks them in terms of \emph{cognitive cost}. For example, the lowest cost is the \emph{null} prototype that contains either a rest or a single note whereas the \emph{filled} prototype that has a note in every position of the sequence e.g. 
+$
+\langle 1,1,1,1 \rangle \nonumber
+$  
+which, in turn, has a lower cost than the \emph{syncopated} prototype that has a 0 in the first (i.e.\ strongest) metrical position e.g.  
+$
+\langle 0,1,1,1 \rangle \nonumber
+$.
+The model analyses the cost for the whole rhythm-pattern and its sub-sequences at each metrical level determined by $\subdivision_\metriclevel$. The final output is a weighted sum of the costs by the number of sub-sequences in each level.
 
 \subsubsection{Toussaint 2002 `Metric Complexity' (\metrical)}
 \label{sec:background:models:tmc}
@@ -103,16 +121,14 @@
 \label{sec:background:models:wnbd}
 
 
-\subsubsection{Capabilities of models}
-\label{sec:background:models:capabilities}
+\begin{table}
+\renewcommand{\arraystretch}{1.2}
+\centering
 
-\begin{table}
-\centering
-\caption{Comparisons of the properties of syncopation models.}
-\label{ta:capabilites}
-\begin{tabular}{c | c c c c c c c}
-\hline
-Property & \lhl & \pressing & \metrical & \sioros & \keith & \offbeat & \wnbd \\
+{\footnotesize
+\begin{tabular}{lccccccc}
+%\hline
+Property & \lhl  & \pressing  & \metrical  & \sioros & \keith  & \offbeat  & \wnbd \\
 \hline
 Onset & \checkmark & \checkmark & \checkmark & \checkmark & \checkmark & \checkmark & \checkmark \\
 Duration & & & & & \checkmark & & \checkmark \\
@@ -122,27 +138,20 @@
 Duple & \checkmark & \checkmark & \checkmark & \checkmark & \checkmark & \checkmark & \checkmark \\
 Triple & \checkmark & \checkmark & \checkmark & \checkmark & & \checkmark & \checkmark \\
 \hline
-%\hline
-%Model & Basis & Onset & Dynamics & Melody & Mono & Poly & Duple & Triple \\
-%\hline
-%\lhl & H & \checkmark & & & \checkmark & & \checkmark & \checkmark\\
-%\keith & C & \checkmark & & & \checkmark & \checkmark & \checkmark & \\
-%\pressing & H,C & \checkmark & & & \checkmark & & \checkmark & \checkmark\\
-%\metrical & H & \checkmark & & & \checkmark & & \checkmark & \checkmark\\
-%\offbeat & O & \checkmark & & & \checkmark & \checkmark & \checkmark & \checkmark\\
-%\wnbd & O & \checkmark & & & \checkmark & \checkmark & \checkmark & \checkmark\\
-%\sioros & H & \checkmark & \checkmark & & \checkmark & & \checkmark & \checkmark\\
-%\ksa & A & \checkmark & \checkmark & \checkmark & \checkmark & & \checkmark & \\
-%\hline
 \end{tabular}
+}
+\caption{Musical properties captured by the different syncopation models. All models use note onsets, but only two use note duration rather than inter-onset intervals. Only SG takes dynamics (i.e. variation in note velocity) into account. All models handle monorhythms but the four models based on hierarchical decomposition of rhythm patterns are unable to handle polyrhythmic patterns. All models can process both duple and triple meters with the exception of KTH that can only process duple.}
+\label{ta:capabilites}
 \end{table}
 
-To summarise the seven syncopation models, we compare their capabilities in terms of musical features they can capture in Table~\ref{ta:capabilites}. All the models use temporal features (i.e. onset time point and/or note duration) in the modelling. The SG model also process dynamic information of musical events (i.e. note velocity). We use the term \emph{monorhythm} to refer to any rhythm-pattern that is not polyrhythmic. All the models can measure syncopation of monorhythms, but only KTH, TOB and WNBD models can deal with polyrhythms. Finally, all the models can deal with rhythms (notated) in duple meter, but all models except KTH can cope with rhythms in a triple meter.
 
 
+%All the models use temporal features (i.e. onset time point and/or note duration) in the modelling. The SG model also process dynamic information of musical events (i.e. note velocity). We use the term \emph{monorhythm} to refer to any rhythm-pattern that is not polyrhythmic. All the models can measure syncopation of monorhythms, but only KTH, TOB and WNBD models can deal with polyrhythms. Finally, all the models can deal with rhythms (notated) in duple meter, but all models except KTH can cope with rhythms in a triple meter.
 
 
 
 
 
 
+
+
--- a/SMC2015latex/section/dataset.tex	Mon Apr 27 13:10:10 2015 +0100
+++ b/SMC2015latex/section/dataset.tex	Mon Apr 27 16:30:13 2015 +0100
@@ -1,12 +1,13 @@
 \section{Syncopation Dataset}
 \label{sec:data}
 
-The major outcome of the SynPy toolkit is to provide prediction of the level of syncopation of a certain rhythm pattern, or none if not applicable. As a demonstration, we apply all seven syncopation models on the rhythms in the syncopation perceptual dataset in~\cite{Song14thesis, Song13}. This dataset includes 27 monorhythms in 4/4 meter, 36 monorhythms in 6/8 and 48 polyrhythms in 4/4, altogether 111 rhythm-stimuli. 
+The major outcome of the SynPy toolkit is to provide prediction of the level of syncopation of a certain rhythm pattern, or none if not applicable. As a demonstration, we apply all seven syncopation models on the rhythms in the syncopation perceptual dataset in~\cite{Song15thesis, Song13}. This dataset includes 27 monorhythms in 4/4 meter, 36 monorhythms in 6/8 and 48 polyrhythms in 4/4, altogether 111 rhythm-stimuli. 
 
-\begin{figure}
-\centerline{\epsfig{figure=images/allmodels.pdf, width=\columnwidth}}
-\caption{\textbf{Syncopation predictions of seven models for the syncopation dataset}.}
+\begin{figure*}[t]
+\centering
+\includegraphics[width=0.85\textwidth]{images/allmodels.pdf}
+\caption{Syncopation predictions of seven models for the syncopation dataset.}
 \label{fig:modelpredictions}
-\end{figure}
+\end{figure*}
 
 Figure~\ref{fig:modelpredictions} plots the syncopation predictions of individual model for each rhythm. It shows that each model has different ranges of prediction and applicable scope of rhythm categories (consistent with Table~\ref{ta:capabilities}. 
\ No newline at end of file
--- a/SMC2015latex/section/framework.tex	Mon Apr 27 13:10:10 2015 +0100
+++ b/SMC2015latex/section/framework.tex	Mon Apr 27 16:30:13 2015 +0100
@@ -2,18 +2,19 @@
 
 \begin{figure}[t]
 \centering
-\includegraphics[width=0.6\columnwidth]{images/framework.pdf}
-\caption{Module hierarchy in the synpy toolkit: The top-level module provides a simple interface for the user to test different syncopation models. Musical constructs such as bars, velocity and note sequences, notes and time signatures are defined in the `music objects' module while support for common procedures such as sequence concatenation and subdivision is provided in `basic functions'. Models and file reading components can be interchanged as required by the user.\label{fig:framework}}
+\includegraphics[width=0.95\columnwidth]{images/framework.pdf}
+\caption{Module hierarchy in the synpy toolkit: the top-level module provides a simple interface for the user to test different syncopation models. Musical constructs such as bars, velocity and note sequences, notes and time-signatures are defined in the `music objects' module; support for common procedures such as sequence concatenation and subdivision is provided in `basic functions'. Models and file reading components can be chosen as required by the user.\label{fig:framework}}
 \end{figure}
 
-The architecture of the toolkit (shown in Figure~\ref{fig:framework}) provides a simple interface to the user. Syncopation values can be calculated for each bar in a given source of rhythm data; the user specifies which model to use and supplies any special parameters that are required.  Sources of rhythm data can be a bar object or a list of bars (detailed below in section~\ref{sec:musicobjects}) or, alternatively, the name of a file containing music data. Where a model is unable to calculate a value for a given rhythm pattern, a “None” value is recorded for that bar and the indices of unmeasured bars reported in the output.  Output can optionally be saved directly to XML or JSON files. An example of usage in the Python interpreter is shown in Figure~\ref{ta:example}.
+The architecture of the toolkit is shown in Figure~\ref{fig:framework}. Syncopation values can be calculated for each bar in a given source of rhythm data along with selected statistics over all bars; the user specifies which model to use and supplies any special parameters that are required. Sources of rhythm data can be a bar object or a list of bars (detailed below in section~\ref{sec:musicobjects}) or, alternatively, the name of a file containing music data. Where a model is unable to calculate a value for a given rhythm pattern, a `None' value is recorded for that bar and the indices of unmeasured bars reported in the output.  If no user parameters are specified, the default parameters specified in the literature for each model are used. Output can optionally be saved directly to XML or JSON files. An example of usage in the Python interpreter is shown in Figure~\ref{ta:example}.
 
 \begin{figure}
 \footnotesize{
 \begin{minted}[frame=single,framesep=10pt]{python}
 >>>from synpy import *
 >>>import synpy.PRS as model
->>>calculate_syncopation(model, "clave.rhy")
+>>>calculate_syncopation(model, "clave.rhy", 
+   outfile="clave.xml")
 {'bars_with_valid_output': [0, 1],
  'mean_syncopation_per_bar': 8.625,
  'model_name': 'PRS',
@@ -24,13 +25,13 @@
  'syncopation_by_bar': [8.625, 8.625]}
 \end{minted}
 }
-\caption{To use the toolkit, the top level \code{synpy} module is imported along with a model (in this example Pressing \cite{Pressing97}). Calling \code{calculate\_syncopation()} then gives the syncopation results as shown. 
+\caption{To use the toolkit, the top level \code{synpy} module is imported along with a model (in this example Pressing \cite{Pressing97}). Calling \code{calculate\_syncopation()} then gives the syncopation results as shown, writing output to an XML file. Output file names and extra parameters for a model are added as optional arguments as required by the user. 
 \label{ta:example} }
 \end{figure}
 
 \subsection{Music objects}
 \label{sec:musicobjects}
-The `music objects' module provides classes to represent musical constructs such as bars, velocity and note sequences, time signatures, and individual notes.  A \code{Bar} object holds the rhythm information for a single bar of music along with its associated time signature and optional tempo and ticks-per-quarternote values. \code{Bar} objects may be initialised with either a note sequence or velocity sequence and can be chained together in the form of a doubly-linked \code{BarList} allowing syncopation models to access next and previous bars where appropriate\footnote{Several models \cite{LHL84,Keith91,Pressing97,Gomez05} implemented in the toolkit require knowledge of the contents of previous and/or next bars in order to calculate the syncopation of the current bar.}. The note sequence and velocity sequence classes are direct implementations of the sequences described in section~\ref{sec:background}.    Common low-level procedures such as sequence concatenation and subdivision are provided in `basic functions'.
+The `music objects' module provides classes to represent the musical constructs described in Section~\ref{sec:background}.  A \code{Bar} object holds the rhythm information for a single bar of music along with its associated time-signature and optional tempo and TPQ values (see Section~\ref{sec:background:rhythm:timespan}). \code{Bar} objects may be initialised with either a note sequence or velocity sequence and can be chained together in the form of a doubly-linked \code{BarList} allowing syncopation models to access next and previous bars where appropriate\footnote{Several models \cite{LHL84,Keith91,Pressing97,Gomez05} implemented in the toolkit require knowledge of the contents of previous and/or next bars in order to calculate the syncopation of the current bar.}. The note sequence and velocity sequence classes are direct implementations of the sequences described in Section~\ref{sec:background:rhythm:note}.    Common low-level procedures such as sequence concatenation and subdivision are provided in `basic functions'.
 
 \subsection{File Input}
 \label{sec:fileinput}
@@ -39,7 +40,7 @@
 \begin{figure}
 \footnotesize{
 \begin{minted}[frame=single,framesep=10pt]{python}
-T{4/4} # time signature
+T{4/4} # time-signature
 TPQ{4} # ticks per quarternote
 # Bar 1
 Y{(0,3,2),(3,1,1),(6,2,2),(10,2,1),(12,4,1)}
@@ -50,9 +51,9 @@
 \caption{Example rhythm annotation \code{.rhy} file containing two bars of the Son Clave rhythm. The first is expressed as a note sequence with resolution of four ticks per quarternote; the second is the same rhythm expressed as a velocity sequence (see section~\ref{sec:background}).}
 \label{ta:clave} 
 \end{figure}
-Our \code{.rhy} annotation format is a light text syntax for descibing rhtyhm patterns directly in terms of note and velocity sequences (see Figure~\ref{ta:clave}). The full syntax specification is given in Backus Naur Form on the toolkit page XXXURL.
+Our \code{.rhy} annotation format is a light text syntax for descibing rhtyhm patterns directly in terms of note and velocity sequences (see Figure~\ref{ta:clave}). The full syntax specification is given in Backus Naur Form on the toolkit page \cite{Song14URL}.
 
-The MIDI file reader can open type 0 and type 1 standard MIDI files and select a given track to read rhythm from.  Notes with zero delta time between them are treated as the same event for the purposes of creating note sequences from the MIDI stream. Time signature and tempo events encoded in the MIDI stream are assumed to correctly describe those parameters of the recorded music so it is recommended that the user uses correctly annotated and quantised MIDI files.
+The MIDI file reader can open type 0 and type 1 standard MIDI files and select a given track to read rhythm from.  Notes with zero delta time between them (i.e. chords) are treated as the same event for the purposes of creating note sequences from the MIDI stream. Time-signature and tempo events encoded in the MIDI stream are assumed to correctly describe those parameters of the recorded music so it is recommended that the user uses correctly annotated and quantised MIDI files.
 
 \subsection{Plugin architecture}
 The system architecture has been designed to allow new models to be added easily.   Models have a common interface, exposing a single function that will return the syncopation value for a bar of music.  Optional parameters may be supplied as a Python dictionary if the user wishes to specify settings different from the those given in the literature for a specific model.
--- a/SMC2015latex/section/introduction.tex	Mon Apr 27 13:10:10 2015 +0100
+++ b/SMC2015latex/section/introduction.tex	Mon Apr 27 16:30:13 2015 +0100
@@ -1,11 +1,13 @@
 \section{Introduction}
 \label{sec:introduction}
 
-Syncopation is a fundamental feature of rhythm in music and a crucial aspect of musical character in many styles and cultures. Having comprehensive models to capture syncopation perception allows us to better understand the broad aspects of music perception. Over the last thirty years, several modelling approaches for syncopation have been developed and heavily used in studies in multiple disciplines~\cite{Fitch_Rosenfeld07, Smith_Honing07, Keller_Schubert11, Madison13, Witek14}. To date, formal investigations on the links between syncopation and  music perception subjects such as meter induction, emotion and groove, have largely relied on quantitative measures of syncopation [cites?]. However, until now there has not been a comprehensive reference implementation of the different algorithms available to facilitate quantifying syncopation.
+Syncopation is a fundamental feature of rhythm in music and a crucial aspect of musical character in many styles and cultures. Having comprehensive models to capture syncopation perception allows us to better understand the broader aspects of music perception. Over the last thirty years, several modelling approaches for syncopation have been developed and heavily used in studies in multiple disciplines~\cite{Fitch_Rosenfeld07, Smith_Honing07, Keller_Schubert11, Madison13, Witek14}. To date, formal investigations on the links between syncopation and  music perception subjects such as meter induction, emotion and groove, have largely relied on quantitative measures of syncopation [cites?]. However, until now there has not been a comprehensive reference implementation of the different algorithms available to facilitate quantifying syncopation.
 
-In~\cite{Song14thesis}, Song provides a consolidated mathematical framework and in-depth review of seven widely used syncopation models including: Longuet-Higgins and Lee's model (LHL)~\cite{LHL84}, Pressing's model (PRS)~\cite{Pressing97,Pressing93}, Toussaint's Metric Complexity model (TMC)~\cite{Toussaint02Metrical}, Sioros and Guedes's model (SG)~\cite{Sioros11,Sioros12}, Keith's model (KTH)~\cite{Keith91}, Toussaint's off-beatness measure (TOB)~\cite{Toussaint05Offbeatness} and G\'omez et al.'s Weighted Note-to- Beat Distance (WNBD)~\cite{Gomez05}.
+In~\cite{Song15thesis}, Song provides a consolidated mathematical framework and in-depth review of seven widely used syncopation models including: Longuet-Higgins and Lee's model (LHL)~\cite{LHL84}, Pressing's model (PRS)~\cite{Pressing97,Pressing93}, Toussaint's Metric Complexity model (TMC)~\cite{Toussaint02Metrical}, Sioros and Guedes's model (SG)~\cite{Sioros11,Sioros12}, Keith's model (KTH)~\cite{Keith91}, Toussaint's off-beatness measure (TOB)~\cite{Toussaint05Offbeatness} and G\'omez et al.'s Weighted Note-to-Beat Distance (WNBD)~\cite{Gomez05}. 
 Based on this mathematical framework, the SynPy toolkit provides implementations of these syncopation models in the Python programming language. 
 
-XXXXX Key features XXXXX. For ease of input, the SynPy toolkit is able to process standard MIDI files or text annotations of rhythm patterns in an intuitive, simple syntax. It is able to process multiple bars of music, reporting syncopation values bar by bar as well as various descriptive statistics across a whole piece. This toolkit also defines a common interface for syncopation models, which provides a simple plugin architecture for future extensibility.  
+novel features - time sig, tempo, real music file input, polyrhythm
 
-In section 2 we briefly review the seven syncopation models and introduce the mathematical representations of a few important rhythmic concepts used in the implementations. In section 3 we describe the framework for SynPy which is the main contribution of this paper. We outline the functional requirements, define the input source and describe the usage.
+XXXXX Key features XXXXX. For ease of input, the SynPy toolkit is able to process standard MIDI files or text annotations of rhythm patterns in a simple, intuitive syntax. Multiple bars of music can be processed, reporting syncopation values bar by bar as well as various descriptive statistics across a whole piece. The toolkit defines a common interface for syncopation models, providing a simple plugin architecture for future extensibility.  
+
+In section~\ref{sec:background} we introduce mathematical representations of a few key rhythmic concepts that form the basis of the toolkit then briefly review seven syncopation models that have been implemented. In section~\ref{sec:framework} we outline the functional requirements and  architecture of SynPy, describing input sources, options and usage.
--- a/SMC2015latex/syncopation_toolkit.tex	Mon Apr 27 13:10:10 2015 +0100
+++ b/SMC2015latex/syncopation_toolkit.tex	Mon Apr 27 16:30:13 2015 +0100
@@ -145,7 +145,7 @@
 
 \input{section/introduction}
 
-%\input{section/background}
+\input{section/background}
 
 \input{section/framework}