annotate draft.tex @ 75:8a146c651475 tip

Added ready made bbl
author samer
date Fri, 01 Jun 2012 16:19:55 +0100
parents 9135f6fb1a68
children
rev   line source
samer@41 1 \documentclass[conference]{IEEEtran}
samer@59 2 \usepackage{fixltx2e}
samer@4 3 \usepackage{cite}
samer@4 4 \usepackage[cmex10]{amsmath}
samer@4 5 \usepackage{graphicx}
samer@4 6 \usepackage{amssymb}
samer@4 7 \usepackage{epstopdf}
samer@4 8 \usepackage{url}
samer@4 9 \usepackage{listings}
samer@18 10 %\usepackage[expectangle]{tools}
samer@9 11 \usepackage{tools}
samer@18 12 \usepackage{tikz}
samer@18 13 \usetikzlibrary{calc}
samer@18 14 \usetikzlibrary{matrix}
samer@18 15 \usetikzlibrary{patterns}
samer@18 16 \usetikzlibrary{arrows}
samer@9 17
samer@9 18 \let\citep=\cite
samer@33 19 \newcommand{\colfig}[2][1]{\includegraphics[width=#1\linewidth]{figs/#2}}%
samer@18 20 \newcommand\preals{\reals_+}
samer@18 21 \newcommand\X{\mathcal{X}}
samer@18 22 \newcommand\Y{\mathcal{Y}}
samer@18 23 \newcommand\domS{\mathcal{S}}
samer@18 24 \newcommand\A{\mathcal{A}}
samer@25 25 \newcommand\Data{\mathcal{D}}
samer@18 26 \newcommand\rvm[1]{\mathrm{#1}}
samer@18 27 \newcommand\sps{\,.\,}
samer@18 28 \newcommand\Ipred{\mathcal{I}_{\mathrm{pred}}}
samer@18 29 \newcommand\Ix{\mathcal{I}}
samer@18 30 \newcommand\IXZ{\overline{\underline{\mathcal{I}}}}
samer@18 31 \newcommand\x{\vec{x}}
samer@18 32 \newcommand\Ham[1]{\mathcal{H}_{#1}}
samer@18 33 \newcommand\subsets[2]{[#1]^{(k)}}
samer@18 34 \def\bet(#1,#2){#1..#2}
samer@18 35
samer@18 36
samer@18 37 \def\ev(#1=#2){#1\!\!=\!#2}
samer@18 38 \newcommand\rv[1]{\Omega \to #1}
samer@18 39 \newcommand\ceq{\!\!=\!}
samer@18 40 \newcommand\cmin{\!-\!}
samer@18 41 \newcommand\modulo[2]{#1\!\!\!\!\!\mod#2}
samer@18 42
samer@18 43 \newcommand\sumitoN{\sum_{i=1}^N}
samer@18 44 \newcommand\sumktoK{\sum_{k=1}^K}
samer@18 45 \newcommand\sumjtoK{\sum_{j=1}^K}
samer@18 46 \newcommand\sumalpha{\sum_{\alpha\in\A}}
samer@18 47 \newcommand\prodktoK{\prod_{k=1}^K}
samer@18 48 \newcommand\prodjtoK{\prod_{j=1}^K}
samer@18 49
samer@18 50 \newcommand\past[1]{\overset{\rule{0pt}{0.2em}\smash{\leftarrow}}{#1}}
samer@18 51 \newcommand\fut[1]{\overset{\rule{0pt}{0.1em}\smash{\rightarrow}}{#1}}
samer@18 52 \newcommand\parity[2]{P^{#1}_{2,#2}}
samer@4 53
samer@4 54 %\usepackage[parfill]{parskip}
samer@4 55
samer@4 56 \begin{document}
samer@41 57 \title{Cognitive Music Modelling: an\\Information Dynamics Approach}
samer@4 58
samer@4 59 \author{
hekeus@16 60 \IEEEauthorblockN{Samer A. Abdallah, Henrik Ekeus, Peter Foster}
hekeus@16 61 \IEEEauthorblockN{Andrew Robertson and Mark D. Plumbley}
samer@4 62 \IEEEauthorblockA{Centre for Digital Music\\
samer@4 63 Queen Mary University of London\\
samer@41 64 Mile End Road, London E1 4NS}}
samer@4 65
samer@4 66 \maketitle
samer@18 67 \begin{abstract}
samer@61 68 We describe an information-theoretic approach to the analysis
samer@61 69 of music and other sequential data, which emphasises the predictive aspects
samer@61 70 of perception, and the dynamic process
samer@61 71 of forming and modifying expectations about an unfolding stream of data,
samer@61 72 characterising these using the tools of information theory: entropies,
samer@61 73 mutual informations, and related quantities.
samer@61 74 After reviewing the theoretical foundations,
samer@61 75 % we present a new result on predictive information rates in high-order Markov chains, and
samer@61 76 we discuss a few emerging areas of application, including
samer@61 77 musicological analysis, real-time beat-tracking analysis, and the generation
samer@61 78 of musical materials as a cognitively-informed compositional aid.
hekeus@16 79 \end{abstract}
samer@4 80
samer@4 81
samer@25 82 \section{Introduction}
samer@9 83 \label{s:Intro}
samer@56 84 The relationship between
samer@56 85 Shannon's \cite{Shannon48} information theory and music and art in general has been the
samer@56 86 subject of some interest since the 1950s
samer@70 87 \cite{Youngblood58,CoonsKraehenbuehl1958,Moles66,Meyer67,Cohen1962}.
samer@56 88 The general thesis is that perceptible qualities and subjective states
samer@56 89 like uncertainty, surprise, complexity, tension, and interestingness
samer@56 90 are closely related to information-theoretic quantities like
samer@56 91 entropy, relative entropy, and mutual information.
samer@56 92
samer@56 93 Music is also an inherently dynamic process,
samer@61 94 where listeners build up expectations about what is to happen next,
samer@61 95 which may be fulfilled
samer@61 96 immediately, after some delay, or modified as the music unfolds.
samer@56 97 In this paper, we explore this ``Information Dynamics'' view of music,
samer@61 98 discussing the theory behind it and some emerging applications.
samer@9 99
samer@25 100 \subsection{Expectation and surprise in music}
samer@70 101 The idea that the musical experience is strongly shaped by the generation
samer@61 102 and playing out of strong and weak expectations was put forward by, amongst others,
samer@61 103 music theorists L. B. Meyer \cite{Meyer67} and Narmour \citep{Narmour77}, but was
samer@18 104 recognised much earlier; for example,
samer@9 105 it was elegantly put by Hanslick \cite{Hanslick1854} in the
samer@9 106 nineteenth century:
samer@9 107 \begin{quote}
samer@9 108 `The most important factor in the mental process which accompanies the
samer@9 109 act of listening to music, and which converts it to a source of pleasure,
samer@18 110 is \ldots the intellectual satisfaction
samer@9 111 which the listener derives from continually following and anticipating
samer@9 112 the composer's intentions---now, to see his expectations fulfilled, and
samer@18 113 now, to find himself agreeably mistaken.
samer@18 114 %It is a matter of course that
samer@18 115 %this intellectual flux and reflux, this perpetual giving and receiving
samer@18 116 %takes place unconsciously, and with the rapidity of lightning-flashes.'
samer@9 117 \end{quote}
samer@9 118 An essential aspect of this is that music is experienced as a phenomenon
samer@61 119 that unfolds in time, rather than being apprehended as a static object
samer@61 120 presented in its entirety. Meyer argued that the experience depends
samer@9 121 on how we change and revise our conceptions \emph{as events happen}, on
samer@9 122 how expectation and prediction interact with occurrence, and that, to a
samer@9 123 large degree, the way to understand the effect of music is to focus on
samer@9 124 this `kinetics' of expectation and surprise.
samer@9 125
samer@25 126 Prediction and expectation are essentially probabilistic concepts
samer@25 127 and can be treated mathematically using probability theory.
samer@25 128 We suppose that when we listen to music, expectations are created on the basis
samer@25 129 of our familiarity with various styles of music and our ability to
samer@25 130 detect and learn statistical regularities in the music as they emerge,
samer@25 131 There is experimental evidence that human listeners are able to internalise
samer@25 132 statistical knowledge about musical structure, \eg
samer@70 133 % \citep{SaffranJohnsonAslin1999,EerolaToiviainenKrumhansl2002}, and also
samer@70 134 \citep{SaffranJohnsonAslin1999}, and also
samer@25 135 that statistical models can form an effective basis for computational
samer@25 136 analysis of music, \eg
samer@25 137 \cite{ConklinWitten95,PonsfordWigginsMellish1999,Pearce2005}.
samer@25 138
samer@56 139 % \subsection{Music and information theory}
samer@24 140 With a probabilistic framework for music modelling and prediction in hand,
samer@70 141 we can %are in a position to
samer@70 142 compute various
samer@25 143 \comment{
samer@25 144 which provides us with a number of measures, such as entropy
samer@25 145 and mutual information, which are suitable for quantifying states of
samer@25 146 uncertainty and surprise, and thus could potentially enable us to build
samer@25 147 quantitative models of the listening process described above. They are
samer@25 148 what Berlyne \cite{Berlyne71} called `collative variables' since they are
samer@25 149 to do with patterns of occurrence rather than medium-specific details.
samer@25 150 Berlyne sought to show that the collative variables are closely related to
samer@25 151 perceptual qualities like complexity, tension, interestingness,
samer@25 152 and even aesthetic value, not just in music, but in other temporal
samer@25 153 or visual media.
samer@25 154 The relevance of information theory to music and art has
samer@25 155 also been addressed by researchers from the 1950s onwards
samer@25 156 \cite{Youngblood58,CoonsKraehenbuehl1958,Cohen1962,HillerBean66,Moles66,Meyer67}.
samer@25 157 }
samer@9 158 information-theoretic quantities like entropy, relative entropy,
samer@9 159 and mutual information.
samer@9 160 % and are major determinants of the overall experience.
samer@9 161 Berlyne \cite{Berlyne71} called such quantities `collative variables', since
samer@9 162 they are to do with patterns of occurrence rather than medium-specific details,
samer@9 163 and developed the ideas of `information aesthetics' in an experimental setting.
samer@9 164 % Berlyne's `new experimental aesthetics', the `information-aestheticians'.
samer@9 165
samer@9 166 % Listeners then experience greater or lesser levels of surprise
samer@9 167 % in response to departures from these norms.
samer@9 168 % By careful manipulation
samer@9 169 % of the material, the composer can thus define, and induce within the
samer@9 170 % listener, a temporal programme of varying
samer@9 171 % levels of uncertainty, ambiguity and surprise.
samer@9 172
samer@9 173
samer@9 174 \subsection{Information dynamic approach}
samer@70 175 Our working hypothesis is that, as an intelligent, predictive
samer@70 176 agent (to which will refer as `it') listens to a piece of music, it maintains
samer@70 177 a dynamically evolving probabilistic belief state that enables it to make predictions
samer@24 178 about how the piece will continue, relying on both its previous experience
samer@61 179 of music and the emerging themes of the piece. As events unfold, it revises
samer@70 180 this belief state, which includes predictive
samer@25 181 distributions over possible future events. These
samer@25 182 % distributions and changes in distributions
samer@25 183 can be characterised in terms of a handful of information
samer@25 184 theoretic-measures such as entropy and relative entropy. By tracing the
samer@24 185 evolution of a these measures, we obtain a representation which captures much
samer@25 186 of the significant structure of the music.
samer@25 187
samer@70 188 One consequence of this approach is that regardless of the details of
samer@25 189 the sensory input or even which sensory modality is being processed, the resulting
samer@25 190 analysis is in terms of the same units: quantities of information (bits) and
samer@61 191 rates of information flow (bits per second). The information
samer@25 192 theoretic concepts in terms of which the analysis is framed are universal to all sorts
samer@25 193 of data.
samer@25 194 In addition, when adaptive probabilistic models are used, expectations are
samer@61 195 created mainly in response to \emph{patterns} of occurence,
samer@25 196 rather the details of which specific things occur.
samer@25 197 Together, these suggest that an information dynamic analysis captures a
samer@25 198 high level of \emph{abstraction}, and could be used to
samer@25 199 make structural comparisons between different temporal media,
samer@25 200 such as music, film, animation, and dance.
samer@25 201 % analyse and compare information
samer@25 202 % flow in different temporal media regardless of whether they are auditory,
samer@25 203 % visual or otherwise.
samer@9 204
samer@25 205 Another consequence is that the information dynamic approach gives us a principled way
samer@24 206 to address the notion of \emph{subjectivity}, since the analysis is dependent on the
samer@24 207 probability model the observer starts off with, which may depend on prior experience
samer@24 208 or other factors, and which may change over time. Thus, inter-subject variablity and
samer@24 209 variation in subjects' responses over time are
samer@24 210 fundamental to the theory.
samer@9 211
samer@18 212 %modelling the creative process, which often alternates between generative
samer@18 213 %and selective or evaluative phases \cite{Boden1990}, and would have
samer@18 214 %applications in tools for computer aided composition.
samer@18 215
samer@18 216
samer@18 217 \section{Theoretical review}
samer@18 218
samer@34 219 \subsection{Entropy and information}
samer@41 220 \label{s:entro-info}
samer@41 221
samer@34 222 Let $X$ denote some variable whose value is initially unknown to our
samer@34 223 hypothetical observer. We will treat $X$ mathematically as a random variable,
samer@36 224 with a value to be drawn from some set $\X$ and a
samer@34 225 probability distribution representing the observer's beliefs about the
samer@34 226 true value of $X$.
samer@34 227 In this case, the observer's uncertainty about $X$ can be quantified
samer@34 228 as the entropy of the random variable $H(X)$. For a discrete variable
samer@36 229 with probability mass function $p:\X \to [0,1]$, this is
samer@34 230 \begin{equation}
samer@41 231 H(X) = \sum_{x\in\X} -p(x) \log p(x), % = \expect{-\log p(X)},
samer@34 232 \end{equation}
samer@41 233 % where $\expect{}$ is the expectation operator.
samer@41 234 The negative-log-probability
samer@34 235 $\ell(x) = -\log p(x)$ of a particular value $x$ can usefully be thought of as
samer@34 236 the \emph{surprisingness} of the value $x$ should it be observed, and
samer@61 237 hence the entropy is the expectation of the surprisingness, $\expect \ell(X)$.
samer@34 238
samer@34 239 Now suppose that the observer receives some new data $\Data$ that
samer@34 240 causes a revision of its beliefs about $X$. The \emph{information}
samer@34 241 in this new data \emph{about} $X$ can be quantified as the
samer@61 242 relative entropy or
samer@34 243 Kullback-Leibler (KL) divergence between the prior and posterior
samer@34 244 distributions $p(x)$ and $p(x|\Data)$ respectively:
samer@34 245 \begin{equation}
samer@34 246 \mathcal{I}_{\Data\to X} = D(p_{X|\Data} || p_{X})
samer@36 247 = \sum_{x\in\X} p(x|\Data) \log \frac{p(x|\Data)}{p(x)}.
samer@41 248 \label{eq:info}
samer@34 249 \end{equation}
samer@34 250 When there are multiple variables $X_1, X_2$
samer@34 251 \etc which the observer believes to be dependent, then the observation of
samer@34 252 one may change its beliefs and hence yield information about the
samer@34 253 others. The joint and conditional entropies as described in any
samer@34 254 textbook on information theory (\eg \cite{CoverThomas}) then quantify
samer@34 255 the observer's expected uncertainty about groups of variables given the
samer@34 256 values of others. In particular, the \emph{mutual information}
samer@34 257 $I(X_1;X_2)$ is both the expected information
samer@34 258 in an observation of $X_2$ about $X_1$ and the expected reduction
samer@34 259 in uncertainty about $X_1$ after observing $X_2$:
samer@34 260 \begin{equation}
samer@34 261 I(X_1;X_2) = H(X_1) - H(X_1|X_2),
samer@34 262 \end{equation}
samer@34 263 where $H(X_1|X_2) = H(X_1,X_2) - H(X_2)$ is the conditional entropy
samer@34 264 of $X_2$ given $X_1$. A little algebra shows that $I(X_1;X_2)=I(X_2;X_1)$
samer@34 265 and so the mutual information is symmetric in its arguments. A conditional
samer@34 266 form of the mutual information can be formulated analogously:
samer@34 267 \begin{equation}
samer@34 268 I(X_1;X_2|X_3) = H(X_1|X_3) - H(X_1|X_2,X_3).
samer@34 269 \end{equation}
samer@34 270 These relationships between the various entropies and mutual
samer@61 271 informations are conveniently visualised in \emph{information diagrams}
samer@34 272 or I-diagrams \cite{Yeung1991} such as the one in \figrf{venn-example}.
samer@34 273
samer@18 274 \begin{fig}{venn-example}
samer@18 275 \newcommand\rad{2.2em}%
samer@18 276 \newcommand\circo{circle (3.4em)}%
samer@18 277 \newcommand\labrad{4.3em}
samer@18 278 \newcommand\bound{(-6em,-5em) rectangle (6em,6em)}
samer@18 279 \newcommand\colsep{\ }
samer@18 280 \newcommand\clipin[1]{\clip (#1) \circo;}%
samer@18 281 \newcommand\clipout[1]{\clip \bound (#1) \circo;}%
samer@18 282 \newcommand\cliptwo[3]{%
samer@18 283 \begin{scope}
samer@18 284 \clipin{#1};
samer@18 285 \clipin{#2};
samer@18 286 \clipout{#3};
samer@18 287 \fill[black!30] \bound;
samer@18 288 \end{scope}
samer@18 289 }%
samer@18 290 \newcommand\clipone[3]{%
samer@18 291 \begin{scope}
samer@18 292 \clipin{#1};
samer@18 293 \clipout{#2};
samer@18 294 \clipout{#3};
samer@18 295 \fill[black!15] \bound;
samer@18 296 \end{scope}
samer@18 297 }%
samer@18 298 \begin{tabular}{c@{\colsep}c}
samer@18 299 \begin{tikzpicture}[baseline=0pt]
samer@18 300 \coordinate (p1) at (90:\rad);
samer@18 301 \coordinate (p2) at (210:\rad);
samer@18 302 \coordinate (p3) at (-30:\rad);
samer@18 303 \clipone{p1}{p2}{p3};
samer@18 304 \clipone{p2}{p3}{p1};
samer@18 305 \clipone{p3}{p1}{p2};
samer@18 306 \cliptwo{p1}{p2}{p3};
samer@18 307 \cliptwo{p2}{p3}{p1};
samer@18 308 \cliptwo{p3}{p1}{p2};
samer@18 309 \begin{scope}
samer@18 310 \clip (p1) \circo;
samer@18 311 \clip (p2) \circo;
samer@18 312 \clip (p3) \circo;
samer@18 313 \fill[black!45] \bound;
samer@18 314 \end{scope}
samer@18 315 \draw (p1) \circo;
samer@18 316 \draw (p2) \circo;
samer@18 317 \draw (p3) \circo;
samer@18 318 \path
samer@18 319 (barycentric cs:p3=1,p1=-0.2,p2=-0.1) +(0ex,0) node {$I_{3|12}$}
samer@18 320 (barycentric cs:p1=1,p2=-0.2,p3=-0.1) +(0ex,0) node {$I_{1|23}$}
samer@18 321 (barycentric cs:p2=1,p3=-0.2,p1=-0.1) +(0ex,0) node {$I_{2|13}$}
samer@18 322 (barycentric cs:p3=1,p2=1,p1=-0.55) +(0ex,0) node {$I_{23|1}$}
samer@18 323 (barycentric cs:p1=1,p3=1,p2=-0.55) +(0ex,0) node {$I_{13|2}$}
samer@18 324 (barycentric cs:p2=1,p1=1,p3=-0.55) +(0ex,0) node {$I_{12|3}$}
samer@18 325 (barycentric cs:p3=1,p2=1,p1=1) node {$I_{123}$}
samer@18 326 ;
samer@18 327 \path
samer@18 328 (p1) +(140:\labrad) node {$X_1$}
samer@18 329 (p2) +(-140:\labrad) node {$X_2$}
samer@18 330 (p3) +(-40:\labrad) node {$X_3$};
samer@18 331 \end{tikzpicture}
samer@18 332 &
samer@18 333 \parbox{0.5\linewidth}{
samer@18 334 \small
samer@18 335 \begin{align*}
samer@18 336 I_{1|23} &= H(X_1|X_2,X_3) \\
samer@18 337 I_{13|2} &= I(X_1;X_3|X_2) \\
samer@18 338 I_{1|23} + I_{13|2} &= H(X_1|X_2) \\
samer@18 339 I_{12|3} + I_{123} &= I(X_1;X_2)
samer@18 340 \end{align*}
samer@18 341 }
samer@18 342 \end{tabular}
samer@18 343 \caption{
samer@61 344 I-diagram of entropies and mutual informations
samer@18 345 for three random variables $X_1$, $X_2$ and $X_3$. The areas of
samer@18 346 the three circles represent $H(X_1)$, $H(X_2)$ and $H(X_3)$ respectively.
samer@18 347 The total shaded area is the joint entropy $H(X_1,X_2,X_3)$.
samer@18 348 The central area $I_{123}$ is the co-information \cite{McGill1954}.
samer@18 349 Some other information measures are indicated in the legend.
samer@18 350 }
samer@18 351 \end{fig}
samer@30 352
samer@30 353
samer@36 354 \subsection{Surprise and information in sequences}
samer@36 355 \label{s:surprise-info-seq}
samer@30 356
samer@36 357 Suppose that $(\ldots,X_{-1},X_0,X_1,\ldots)$ is a sequence of
samer@30 358 random variables, infinite in both directions,
samer@36 359 and that $\mu$ is the associated probability measure over all
samer@61 360 realisations of the sequence. In the following, $\mu$ will simply serve
samer@30 361 as a label for the process. We can indentify a number of information-theoretic
samer@30 362 measures meaningful in the context of a sequential observation of the sequence, during
samer@61 363 which, at any time $t$, the sequence can be divided into a `present' $X_t$, a `past'
samer@30 364 $\past{X}_t \equiv (\ldots, X_{t-2}, X_{t-1})$, and a `future'
samer@30 365 $\fut{X}_t \equiv (X_{t+1},X_{t+2},\ldots)$.
samer@41 366 We will write the actually observed value of $X_t$ as $x_t$, and
samer@36 367 the sequence of observations up to but not including $x_t$ as
samer@36 368 $\past{x}_t$.
samer@36 369 % Since the sequence is assumed stationary, we can without loss of generality,
samer@36 370 % assume that $t=0$ in the following definitions.
samer@36 371
samer@41 372 The in-context surprisingness of the observation $X_t=x_t$ depends on
samer@41 373 both $x_t$ and the context $\past{x}_t$:
samer@36 374 \begin{equation}
samer@41 375 \ell_t = - \log p(x_t|\past{x}_t).
samer@36 376 \end{equation}
samer@61 377 However, before $X_t$ is observed, the observer can compute
samer@46 378 the \emph{expected} surprisingness as a measure of its uncertainty about
samer@61 379 $X_t$; this may be written as an entropy
samer@36 380 $H(X_t|\ev(\past{X}_t = \past{x}_t))$, but note that this is
samer@61 381 conditional on the \emph{event} $\ev(\past{X}_t=\past{x}_t)$, not the
samer@36 382 \emph{variables} $\past{X}_t$ as in the conventional conditional entropy.
samer@36 383
samer@41 384 The surprisingness $\ell_t$ and expected surprisingness
samer@36 385 $H(X_t|\ev(\past{X}_t=\past{x}_t))$
samer@41 386 can be understood as \emph{subjective} information dynamic measures, since they are
samer@41 387 based on the observer's probability model in the context of the actually observed sequence
samer@61 388 $\past{x}_t$. They characterise what it is like to be `in the observer's shoes'.
samer@36 389 If we view the observer as a purely passive or reactive agent, this would
samer@36 390 probably be sufficient, but for active agents such as humans or animals, it is
samer@36 391 often necessary to \emph{aniticipate} future events in order, for example, to plan the
samer@36 392 most effective course of action. It makes sense for such observers to be
samer@36 393 concerned about the predictive probability distribution over future events,
samer@36 394 $p(\fut{x}_t|\past{x}_t)$. When an observation $\ev(X_t=x_t)$ is made in this context,
samer@41 395 the \emph{instantaneous predictive information} (IPI) $\mathcal{I}_t$ at time $t$
samer@41 396 is the information in the event $\ev(X_t=x_t)$ about the entire future of the sequence $\fut{X}_t$,
samer@41 397 \emph{given} the observed past $\past{X}_t=\past{x}_t$.
samer@41 398 Referring to the definition of information \eqrf{info}, this is the KL divergence
samer@41 399 between prior and posterior distributions over possible futures, which written out in full, is
samer@41 400 \begin{equation}
samer@41 401 \mathcal{I}_t = \sum_{\fut{x}_t \in \X^*}
samer@41 402 p(\fut{x}_t|x_t,\past{x}_t) \log \frac{ p(\fut{x}_t|x_t,\past{x}_t) }{ p(\fut{x}_t|\past{x}_t) },
samer@41 403 \end{equation}
samer@41 404 where the sum is to be taken over the set of infinite sequences $\X^*$.
samer@46 405 Note that it is quite possible for an event to be surprising but not informative
samer@70 406 in a predictive sense.
samer@41 407 As with the surprisingness, the observer can compute its \emph{expected} IPI
samer@41 408 at time $t$, which reduces to a mutual information $I(X_t;\fut{X}_t|\ev(\past{X}_t=\past{x}_t))$
samer@41 409 conditioned on the observed past. This could be used, for example, as an estimate
samer@41 410 of attentional resources which should be directed at this stream of data, which may
samer@41 411 be in competition with other sensory streams.
samer@36 412
samer@36 413 \subsection{Information measures for stationary random processes}
samer@43 414 \label{s:process-info}
samer@30 415
samer@18 416
samer@18 417 \begin{fig}{predinfo-bg}
samer@18 418 \newcommand\subfig[2]{\shortstack{#2\\[0.75em]#1}}
samer@18 419 \newcommand\rad{1.8em}%
samer@18 420 \newcommand\ovoid[1]{%
samer@18 421 ++(-#1,\rad)
samer@18 422 -- ++(2 * #1,0em) arc (90:-90:\rad)
samer@18 423 -- ++(-2 * #1,0em) arc (270:90:\rad)
samer@18 424 }%
samer@18 425 \newcommand\axis{2.75em}%
samer@18 426 \newcommand\olap{0.85em}%
samer@18 427 \newcommand\offs{3.6em}
samer@18 428 \newcommand\colsep{\hspace{5em}}
samer@18 429 \newcommand\longblob{\ovoid{\axis}}
samer@18 430 \newcommand\shortblob{\ovoid{1.75em}}
samer@56 431 \begin{tabular}{c}
samer@43 432 \subfig{(a) multi-information and entropy rates}{%
samer@43 433 \begin{tikzpicture}%[baseline=-1em]
samer@43 434 \newcommand\rc{1.75em}
samer@43 435 \newcommand\throw{2.5em}
samer@43 436 \coordinate (p1) at (180:1.5em);
samer@43 437 \coordinate (p2) at (0:0.3em);
samer@43 438 \newcommand\bound{(-7em,-2.6em) rectangle (7em,3.0em)}
samer@43 439 \newcommand\present{(p2) circle (\rc)}
samer@43 440 \newcommand\thepast{(p1) ++(-\throw,0) \ovoid{\throw}}
samer@43 441 \newcommand\fillclipped[2]{%
samer@43 442 \begin{scope}[even odd rule]
samer@43 443 \foreach \thing in {#2} {\clip \thing;}
samer@43 444 \fill[black!#1] \bound;
samer@43 445 \end{scope}%
samer@43 446 }%
samer@43 447 \fillclipped{30}{\present,\bound \thepast}
samer@43 448 \fillclipped{15}{\present,\bound \thepast}
samer@43 449 \fillclipped{45}{\present,\thepast}
samer@43 450 \draw \thepast;
samer@43 451 \draw \present;
samer@43 452 \node at (barycentric cs:p2=1,p1=-0.3) {$h_\mu$};
samer@43 453 \node at (barycentric cs:p2=1,p1=1) [shape=rectangle,fill=black!45,inner sep=1pt]{$\rho_\mu$};
samer@43 454 \path (p2) +(90:3em) node {$X_0$};
samer@43 455 \path (p1) +(-3em,0em) node {\shortstack{infinite\\past}};
samer@43 456 \path (p1) +(-4em,\rad) node [anchor=south] {$\ldots,X_{-1}$};
samer@43 457 \end{tikzpicture}}%
samer@70 458 \\[1em]
samer@43 459 \subfig{(b) excess entropy}{%
samer@18 460 \newcommand\blob{\longblob}
samer@18 461 \begin{tikzpicture}
samer@18 462 \coordinate (p1) at (-\offs,0em);
samer@18 463 \coordinate (p2) at (\offs,0em);
samer@18 464 \begin{scope}
samer@18 465 \clip (p1) \blob;
samer@18 466 \clip (p2) \blob;
samer@18 467 \fill[lightgray] (-1,-1) rectangle (1,1);
samer@18 468 \end{scope}
samer@18 469 \draw (p1) +(-0.5em,0em) node{\shortstack{infinite\\past}} \blob;
samer@18 470 \draw (p2) +(0.5em,0em) node{\shortstack{infinite\\future}} \blob;
samer@18 471 \path (0,0) node (future) {$E$};
samer@18 472 \path (p1) +(-2em,\rad) node [anchor=south] {$\ldots,X_{-1}$};
samer@18 473 \path (p2) +(2em,\rad) node [anchor=south] {$X_0,\ldots$};
samer@18 474 \end{tikzpicture}%
samer@18 475 }%
samer@70 476 \\[1em]
samer@43 477 \subfig{(c) predictive information rate $b_\mu$}{%
samer@18 478 \begin{tikzpicture}%[baseline=-1em]
samer@18 479 \newcommand\rc{2.1em}
samer@18 480 \newcommand\throw{2.5em}
samer@18 481 \coordinate (p1) at (210:1.5em);
samer@18 482 \coordinate (p2) at (90:0.7em);
samer@18 483 \coordinate (p3) at (-30:1.5em);
samer@18 484 \newcommand\bound{(-7em,-2.6em) rectangle (7em,3.0em)}
samer@18 485 \newcommand\present{(p2) circle (\rc)}
samer@18 486 \newcommand\thepast{(p1) ++(-\throw,0) \ovoid{\throw}}
samer@18 487 \newcommand\future{(p3) ++(\throw,0) \ovoid{\throw}}
samer@18 488 \newcommand\fillclipped[2]{%
samer@18 489 \begin{scope}[even odd rule]
samer@18 490 \foreach \thing in {#2} {\clip \thing;}
samer@18 491 \fill[black!#1] \bound;
samer@18 492 \end{scope}%
samer@18 493 }%
samer@43 494 \fillclipped{80}{\future,\thepast}
samer@18 495 \fillclipped{30}{\present,\future,\bound \thepast}
samer@18 496 \fillclipped{15}{\present,\bound \future,\bound \thepast}
samer@18 497 \draw \future;
samer@18 498 \fillclipped{45}{\present,\thepast}
samer@18 499 \draw \thepast;
samer@18 500 \draw \present;
samer@18 501 \node at (barycentric cs:p2=1,p1=-0.17,p3=-0.17) {$r_\mu$};
samer@18 502 \node at (barycentric cs:p1=-0.4,p2=1.0,p3=1) {$b_\mu$};
samer@18 503 \node at (barycentric cs:p3=0,p2=1,p1=1.2) [shape=rectangle,fill=black!45,inner sep=1pt]{$\rho_\mu$};
samer@18 504 \path (p2) +(140:3em) node {$X_0$};
samer@18 505 % \node at (barycentric cs:p3=0,p2=1,p1=1) {$\rho_\mu$};
samer@18 506 \path (p3) +(3em,0em) node {\shortstack{infinite\\future}};
samer@18 507 \path (p1) +(-3em,0em) node {\shortstack{infinite\\past}};
samer@18 508 \path (p1) +(-4em,\rad) node [anchor=south] {$\ldots,X_{-1}$};
samer@18 509 \path (p3) +(4em,\rad) node [anchor=south] {$X_1,\ldots$};
samer@18 510 \end{tikzpicture}}%
samer@70 511 \\[0.25em]
samer@18 512 \end{tabular}
samer@18 513 \caption{
samer@30 514 I-diagrams for several information measures in
samer@18 515 stationary random processes. Each circle or oval represents a random
samer@18 516 variable or sequence of random variables relative to time $t=0$. Overlapped areas
samer@61 517 correspond to various mutual informations.
samer@61 518 In (a) and (c), the circle represents the `present'. Its total area is
samer@33 519 $H(X_0)=\rho_\mu+r_\mu+b_\mu$, where $\rho_\mu$ is the multi-information
samer@18 520 rate, $r_\mu$ is the residual entropy rate, and $b_\mu$ is the predictive
samer@43 521 information rate. The entropy rate is $h_\mu = r_\mu+b_\mu$. The small dark
samer@43 522 region below $X_0$ in (c) is $\sigma_\mu = E-\rho_\mu$.
samer@18 523 }
samer@18 524 \end{fig}
samer@18 525
samer@41 526 If we step back, out of the observer's shoes as it were, and consider the
samer@41 527 random process $(\ldots,X_{-1},X_0,X_1,\dots)$ as a statistical ensemble of
samer@41 528 possible realisations, and furthermore assume that it is stationary,
samer@41 529 then it becomes possible to define a number of information-theoretic measures,
samer@41 530 closely related to those described above, but which characterise the
samer@41 531 process as a whole, rather than on a moment-by-moment basis. Some of these,
samer@41 532 such as the entropy rate, are well-known, but others are only recently being
samer@41 533 investigated. (In the following, the assumption of stationarity means that
samer@41 534 the measures defined below are independent of $t$.)
samer@41 535
samer@61 536 The \emph{entropy rate} of the process is the entropy of the `present'
samer@61 537 $X_t$ given the `past':
samer@41 538 \begin{equation}
samer@41 539 \label{eq:entro-rate}
samer@41 540 h_\mu = H(X_t|\past{X}_t).
samer@41 541 \end{equation}
samer@51 542 The entropy rate is a measure of the overall surprisingness
samer@51 543 or unpredictability of the process, and gives an indication of the average
samer@51 544 level of surprise and uncertainty that would be experienced by an observer
samer@61 545 computing the measures of \secrf{surprise-info-seq} on a sequence sampled
samer@61 546 from the process.
samer@41 547
samer@41 548 The \emph{multi-information rate} $\rho_\mu$ (following Dubnov's \cite{Dubnov2006}
samer@41 549 notation for what he called the `information rate') is the mutual
samer@41 550 information between the `past' and the `present':
samer@41 551 \begin{equation}
samer@41 552 \label{eq:multi-info}
samer@41 553 \rho_\mu = I(\past{X}_t;X_t) = H(X_t) - h_\mu.
samer@41 554 \end{equation}
samer@61 555 It is a measure of how much the preceeding context of an observation
samer@61 556 helps in predicting or reducing the suprisingness of the current observation.
samer@41 557
samer@41 558 The \emph{excess entropy} \cite{CrutchfieldPackard1983}
samer@41 559 is the mutual information between
samer@41 560 the entire `past' and the entire `future':
samer@41 561 \begin{equation}
samer@41 562 E = I(\past{X}_t; X_t,\fut{X}_t).
samer@41 563 \end{equation}
samer@43 564 Both the excess entropy and the multi-information rate can be thought
samer@43 565 of as measures of \emph{redundancy}, quantifying the extent to which
samer@43 566 the same information is to be found in all parts of the sequence.
samer@41 567
samer@41 568
samer@30 569 The \emph{predictive information rate} (or PIR) \cite{AbdallahPlumbley2009}
samer@61 570 is the mutual information between the `present' and the `future' given the
samer@61 571 `past':
samer@18 572 \begin{equation}
samer@18 573 \label{eq:PIR}
samer@61 574 b_\mu = I(X_t;\fut{X}_t|\past{X}_t) = H(\fut{X}_t|\past{X}_t) - H(\fut{X}_t|X_t,\past{X}_t),
samer@18 575 \end{equation}
samer@61 576 which can be read as the average reduction
samer@18 577 in uncertainty about the future on learning $X_t$, given the past.
samer@18 578 Due to the symmetry of the mutual information, it can also be written
samer@18 579 as
samer@18 580 \begin{equation}
samer@18 581 % \IXZ_t
samer@43 582 b_\mu = H(X_t|\past{X}_t) - H(X_t|\past{X}_t,\fut{X}_t) = h_\mu - r_\mu,
samer@18 583 % \label{<++>}
samer@18 584 \end{equation}
samer@18 585 % If $X$ is stationary, then
samer@41 586 where $r_\mu = H(X_t|\fut{X}_t,\past{X}_t)$,
samer@34 587 is the \emph{residual} \cite{AbdallahPlumbley2010},
samer@34 588 or \emph{erasure} \cite{VerduWeissman2006} entropy rate.
samer@18 589 These relationships are illustrated in \Figrf{predinfo-bg}, along with
samer@18 590 several of the information measures we have discussed so far.
samer@51 591 The PIR gives an indication of the average IPI that would be experienced
samer@51 592 by an observer processing a sequence sampled from this process.
samer@18 593
samer@18 594
samer@46 595 James et al \cite{JamesEllisonCrutchfield2011} review several of these
samer@46 596 information measures and introduce some new related ones.
samer@46 597 In particular they identify the $\sigma_\mu = I(\past{X}_t;\fut{X}_t|X_t)$,
samer@46 598 the mutual information between the past and the future given the present,
samer@46 599 as an interesting quantity that measures the predictive benefit of
samer@61 600 model-building, that is, maintaining an internal state summarising past
samer@61 601 observations in order to make better predictions. It is shown as the
samer@46 602 small dark region below the circle in \figrf{predinfo-bg}(c).
samer@46 603 By comparing with \figrf{predinfo-bg}(b), we can see that
samer@46 604 $\sigma_\mu = E - \rho_\mu$.
samer@43 605 % They also identify
samer@43 606 % $w_\mu = \rho_\mu + b_{\mu}$, which they call the \emph{local exogenous
samer@43 607 % information} rate.
samer@34 608
samer@4 609
samer@36 610 \subsection{First and higher order Markov chains}
samer@53 611 \label{s:markov}
samer@70 612 % First order Markov chains are the simplest non-trivial models to which information
samer@70 613 % dynamics methods can be applied.
samer@70 614 In \cite{AbdallahPlumbley2009} we derived
samer@41 615 expressions for all the information measures described in \secrf{surprise-info-seq} for
samer@70 616 ergodic first order Markov chains (\ie that have a unique stationary
samer@61 617 distribution).
samer@61 618 % The derivation is greatly simplified by the dependency structure
samer@61 619 % of the Markov chain: for the purpose of the analysis, the `past' and `future'
samer@61 620 % segments $\past{X}_t$ and $\fut{X}_t$ can be collapsed to just the previous
samer@61 621 % and next variables $X_{t-1}$ and $X_{t+1}$ respectively.
samer@61 622 We also showed that
samer@70 623 the PIR can be expressed simply in terms of entropy rates:
samer@36 624 if we let $a$ denote the $K\times K$ transition matrix of a Markov chain over
samer@36 625 an alphabet of $\{1,\ldots,K\}$, such that
samer@61 626 $a_{ij} = \Pr(\ev(X_t=i|\ev(X_{t-1}=j)))$, and let $h:\reals^{K\times K}\to \reals$ be
samer@36 627 the entropy rate function such that $h(a)$ is the entropy rate of a Markov chain
samer@70 628 with transition matrix $a$, then the PIR is
samer@36 629 \begin{equation}
samer@61 630 b_\mu = h(a^2) - h(a),
samer@36 631 \end{equation}
samer@36 632 where $a^2$, the transition matrix squared, is the transition matrix
samer@36 633 of the `skip one' Markov chain obtained by jumping two steps at a time
samer@36 634 along the original chain.
samer@36 635
samer@36 636 Second and higher order Markov chains can be treated in a similar way by transforming
samer@70 637 to a first order representation of the high order Markov chain. With
samer@70 638 an $N$th order model, this is done by forming a new alphabet of size $K^N$
samer@41 639 consisting of all possible $N$-tuples of symbols from the base alphabet.
samer@41 640 An observation $\hat{x}_t$ in this new model encodes a block of $N$ observations
samer@70 641 $(x_{t+1},\ldots,x_{t+N})$ from the base model.
samer@70 642 % The next
samer@70 643 % observation $\hat{x}_{t+1}$ encodes the block of $N$ obtained by shifting the previous
samer@70 644 % block along by one step.
samer@70 645 The new Markov of chain is parameterised by a sparse $K^N\times K^N$
samer@66 646 transition matrix $\hat{a}$, in terms of which the PIR is
samer@36 647 \begin{equation}
samer@41 648 h_\mu = h(\hat{a}), \qquad b_\mu = h({\hat{a}^{N+1}}) - N h({\hat{a}}),
samer@36 649 \end{equation}
samer@36 650 where $\hat{a}^{N+1}$ is the $(N+1)$th power of the first order transition matrix.
samer@41 651 Other information measures can also be computed for the high-order Markov chain, including
samer@70 652 the multi-information rate $\rho_\mu$ and the excess entropy $E$. (These are identical
samer@41 653 for first order Markov chains, but for order $N$ chains, $E$ can be up to $N$ times larger
samer@70 654 than $\rho_\mu$.)
samer@43 655
samer@70 656 In our experiments with visualising and sonifying sequences sampled from
samer@61 657 first order Markov chains \cite{AbdallahPlumbley2009}, we found that
samer@70 658 the measures $h_\mu$, $\rho_\mu$ and $b_\mu$ correspond to perceptible
samer@70 659 characteristics, and that the transition matrices maximising or minimising
samer@61 660 each of these quantities are quite distinct. High entropy rates are associated
samer@70 661 with completely uncorrelated sequences with no recognisable temporal structure
samer@70 662 (and low $\rho_\mu$ and $b_\mu$).
samer@70 663 High values of $\rho_\mu$ are associated with long periodic cycles (and low $h_\mu$
samer@70 664 and $b_\mu$). High values of $b_\mu$ are associated with intermediate values
samer@61 665 of $\rho_\mu$ and $h_\mu$, and recognisable, but not completely predictable,
samer@61 666 temporal structures. These relationships are visible in \figrf{mtriscat} in
samer@70 667 \secrf{composition}, where we pick up this thread again, with an application of
samer@61 668 information dynamics in a compositional aid.
samer@36 669
samer@36 670
hekeus@16 671 \section{Information Dynamics in Analysis}
samer@4 672
samer@70 673 \subsection{Musicological Analysis}
samer@70 674 \label{s:minimusic}
samer@70 675
samer@24 676 \begin{fig}{twopages}
samer@70 677 \colfig[0.96]{matbase/fig9471}\\ % update from mbc paper
samer@33 678 % \colfig[0.97]{matbase/fig72663}\\ % later update from mbc paper (Keith's new picks)
samer@70 679 \vspace*{0.5em}
samer@24 680 \colfig[0.97]{matbase/fig13377} % rule based analysis
samer@24 681 \caption{Analysis of \emph{Two Pages}.
samer@24 682 The thick vertical lines are the part boundaries as indicated in
samer@24 683 the score by the composer.
samer@24 684 The thin grey lines
samer@24 685 indicate changes in the melodic `figures' of which the piece is
samer@24 686 constructed. In the `model information rate' panel, the black asterisks
samer@70 687 mark the six most surprising moments selected by Keith Potter.
samer@70 688 The bottom two panels show two rule-based boundary strength analyses.
samer@70 689 All information measures are in nats.
samer@70 690 Note that the boundary marked in the score at around note 5,400 is known to be
samer@70 691 anomalous; on the basis of a listening analysis, some musicologists have
samer@70 692 placed the boundary a few bars later, in agreement with our analysis
samer@70 693 \cite{PotterEtAl2007}.
samer@24 694 }
samer@24 695 \end{fig}
samer@24 696
samer@70 697 In \cite{AbdallahPlumbley2009}, we analysed two pieces of music in the minimalist style
samer@36 698 by Philip Glass: \emph{Two Pages} (1969) and \emph{Gradus} (1968).
samer@36 699 The analysis was done using a first-order Markov chain model, with the
samer@36 700 enhancement that the transition matrix of the model was allowed to
samer@36 701 evolve dynamically as the notes were processed, and was tracked (in
samer@36 702 a Bayesian way) as a \emph{distribution} over possible transition matrices,
samer@61 703 rather than a point estimate. Some results are summarised in \figrf{twopages}:
samer@36 704 the upper four plots show the dynamically evolving subjective information
samer@70 705 measures as described in \secrf{surprise-info-seq}, computed using a point
samer@61 706 estimate of the current transition matrix; the fifth plot (the `model information rate')
samer@70 707 shows the information in each observation about the transition matrix.
samer@36 708 In \cite{AbdallahPlumbley2010b}, we showed that this `model information rate'
samer@61 709 is actually a component of the true IPI when the transition
samer@61 710 matrix is being learned online, and was neglected when we computed the IPI from
samer@70 711 the transition matrix as if it were a constant.
samer@36 712
samer@70 713 The peaks of the surprisingness and both components of the IPI
samer@36 714 show good correspondence with structure of the piece both as marked in the score
samer@36 715 and as analysed by musicologist Keith Potter, who was asked to mark the six
samer@70 716 `most surprising moments' of the piece (shown as asterisks in the fifth plot). %%
samer@70 717 % \footnote{%
samer@70 718 % Note that the boundary marked in the score at around note 5,400 is known to be
samer@70 719 % anomalous; on the basis of a listening analysis, some musicologists have
samer@70 720 % placed the boundary a few bars later, in agreement with our analysis
samer@70 721 % \cite{PotterEtAl2007}.}
samer@70 722 %
samer@36 723 In contrast, the analyses shown in the lower two plots of \figrf{twopages},
samer@36 724 obtained using two rule-based music segmentation algorithms, while clearly
samer@37 725 \emph{reflecting} the structure of the piece, do not \emph{segment} the piece,
samer@37 726 with no tendency to peaking of the boundary strength function at
samer@36 727 the boundaries in the piece.
samer@36 728
samer@46 729 The complete analysis of \emph{Gradus} can be found in \cite{AbdallahPlumbley2009},
samer@46 730 but \figrf{metre} illustrates the result of a metrical analysis: the piece was divided
samer@46 731 into bars of 32, 64 and 128 notes. In each case, the average surprisingness and
samer@46 732 IPI for the first, second, third \etc notes in each bar were computed. The plots
samer@46 733 show that the first note of each bar is, on average, significantly more surprising
samer@46 734 and informative than the others, up to the 64-note level, where as at the 128-note,
samer@46 735 level, the dominant periodicity appears to remain at 64 notes.
samer@36 736
samer@24 737 \begin{fig}{metre}
samer@70 738 % \scalebox{1}{%
samer@24 739 \begin{tabular}{cc}
samer@33 740 \colfig[0.45]{matbase/fig36859} & \colfig[0.48]{matbase/fig88658} \\
samer@33 741 \colfig[0.45]{matbase/fig48061} & \colfig[0.48]{matbase/fig46367} \\
samer@33 742 \colfig[0.45]{matbase/fig99042} & \colfig[0.47]{matbase/fig87490}
samer@24 743 % \colfig[0.46]{matbase/fig56807} & \colfig[0.48]{matbase/fig27144} \\
samer@24 744 % \colfig[0.46]{matbase/fig87574} & \colfig[0.48]{matbase/fig13651} \\
samer@24 745 % \colfig[0.44]{matbase/fig19913} & \colfig[0.46]{matbase/fig66144} \\
samer@24 746 % \colfig[0.48]{matbase/fig73098} & \colfig[0.48]{matbase/fig57141} \\
samer@24 747 % \colfig[0.48]{matbase/fig25703} & \colfig[0.48]{matbase/fig72080} \\
samer@24 748 % \colfig[0.48]{matbase/fig9142} & \colfig[0.48]{matbase/fig27751}
samer@24 749
samer@24 750 \end{tabular}%
samer@33 751 % }
samer@24 752 \caption{Metrical analysis by computing average surprisingness and
samer@70 753 IPI of notes at different periodicities (\ie hypothetical
samer@24 754 bar lengths) and phases (\ie positions within a bar).
samer@24 755 }
samer@24 756 \end{fig}
samer@24 757
samer@64 758 \subsection{Real-valued signals and audio analysis}
samer@64 759 Using analogous definitions based on the differential entropy
samer@64 760 \cite{CoverThomas}, the methods outlined
samer@64 761 in \secrf{surprise-info-seq} and \secrf{process-info}
samer@70 762 can be reformulated for random variables taking values in a continuous domain.
samer@70 763 Information-dynamic methods may thus be applied to expressive parameters of music
samer@70 764 such as dynamics, timing and timbre, which are readily quantified on a continuous scale.
samer@70 765
samer@65 766 % \subsection{Audio based content analysis}
samer@65 767 % Using analogous definitions of differential entropy, the methods outlined
samer@65 768 % in the previous section are equally applicable to continuous random variables.
samer@65 769 % In the case of music, where expressive properties such as dynamics, tempo,
samer@65 770 % timing and timbre are readily quantified on a continuous scale, the information
samer@65 771 % dynamic framework may also be considered.
peterf@39 772
samer@64 773 Dubnov \cite{Dubnov2006} considers the class of stationary Gaussian
samer@70 774 processes, for which entropy rate may be obtained analytically
samer@64 775 from the power spectral density of the signal. Dubnov found that the
samer@64 776 multi-information rate (which he refers to as `information rate') can be
samer@70 777 expressed as a function of the \emph{spectral flatness measure}. Thus, for a given variance,
samer@64 778 Gaussian processes with maximal multi-information rate are those with maximally
samer@70 779 non-flat spectra. These essentially consist of a single
samer@70 780 sinusoidal component and hence are completely predictable once
samer@64 781 the parameters of the sinusoid have been inferred.
samer@62 782 % Local stationarity is assumed, which may be achieved by windowing or
samer@62 783 % change point detection \cite{Dubnov2008}.
samer@62 784 %TODO
samer@62 785
samer@64 786 We are currently working towards methods for the computation of predictive information
samer@64 787 rate in some restricted classes of Gaussian processes including finite-order
samer@70 788 autoregressive models and processes with power-law (or $1/f$) spectra,
samer@70 789 which have previously been investegated in relation to their aesthetic properties
samer@70 790 \cite{Voss75,TaylorSpeharVan-Donkelaar2011}.
peterf@69 791
samer@70 792 % (fractionally integrated Gaussian noise).
peterf@69 793 % %(fBm (continuous), fiGn discrete time) possible reference:
peterf@69 794 % @book{palma2007long,
peterf@69 795 % title={Long-memory time series: theory and methods},
peterf@69 796 % author={Palma, W.},
peterf@69 797 % volume={662},
peterf@69 798 % year={2007},
peterf@69 799 % publisher={Wiley-Blackwell}
peterf@69 800 % }
peterf@69 801
peterf@69 802
samer@64 803
samer@64 804 % mention non-gaussian processes extension Similarly, the predictive information
samer@64 805 % rate may be computed using a Gaussian linear formulation CITE. In this view,
samer@64 806 % the PIR is a function of the correlation between random innovations supplied
samer@64 807 % to the stochastic process. %Dubnov, MacAdams, Reynolds (2006) %Bailes and Dean (2009)
samer@64 808
samer@65 809 % In \cite{Dubnov2006}, Dubnov considers the class of stationary Gaussian
samer@65 810 % processes. For such processes, the entropy rate may be obtained analytically
samer@65 811 % from the power spectral density of the signal, allowing the multi-information
samer@65 812 % rate to be subsequently obtained. One aspect demanding further investigation
samer@65 813 % involves the comparison of alternative measures of predictability. In the case of the PIR, a Gaussian linear formulation is applicable, indicating that the PIR is a function of the correlation between random innovations supplied to the stochastic process CITE.
peterf@63 814 % !!! FIXME
peterf@26 815
samer@4 816
samer@4 817 \subsection{Beat Tracking}
samer@4 818
samer@43 819 A probabilistic method for drum tracking was presented by Robertson
samer@70 820 \cite{Robertson11c}. The system infers a beat grid (a sequence
samer@70 821 of approximately regular beat times) given audio inputs from a
samer@70 822 live drummer, for the purpose of synchronising a music
samer@70 823 sequencer with the drummer.
samer@70 824 The times of kick and snare drum events are obtained
samer@70 825 using dedicated microphones for each drum and a percussive onset detector
samer@70 826 \cite{puckette98}. These event times are then sent
samer@70 827 to the beat tracker, which maintains a probabilistic belief state in
samer@70 828 the form of distributions over the tempo and phase of the beat grid.
samer@70 829 Every time an event is received, these distributions are updated
samer@70 830 with respect to a probabilistic model which accounts both for tempo and phase
samer@70 831 variations and the emission of drum events at musically plausible times
samer@70 832 relative to the beat grid.
samer@70 833 %continually updates distributions for tempo and phase on receiving a new
samer@70 834 %event time
samer@43 835
samer@70 836 The use of a probabilistic belief state means we can compute entropies
samer@70 837 representing the system's uncertainty about the beat grid, and quantify
samer@70 838 the amount of information in each event about the beat grid as the KL divergence
samer@70 839 between prior and posterior distributions. Though this is not strictly the
samer@70 840 instantaneous predictive information (IPI) as described in \secrf{surprise-info-seq}
samer@70 841 (the information gained is not directly about future event times), we can treat
samer@70 842 it as a proxy for the IPI, in the manner of the `model information rate'
samer@70 843 described in \secrf{minimusic}, which has a similar status.
samer@43 844
samer@70 845 \begin{fig*}{drumfig}
samer@70 846 % \includegraphics[width=0.9\linewidth]{drum_plots/file9-track.eps}% \\
samer@72 847 \includegraphics[width=0.97\linewidth]{figs/file11-track.eps} \\
samer@70 848 % \includegraphics[width=0.9\linewidth]{newplots/file8-track.eps}
samer@70 849 \caption{Information dynamic analysis derived from audio recordings of
samer@70 850 drumming, obtained by applying a Bayesian beat tracking system to the
samer@70 851 sequence of detected kick and snare drum events. The grey line show the system's
samer@70 852 varying level of uncertainty (entropy) about the tempo and phase of the
samer@70 853 beat grid, while the stem plot shows the amount of information in each
samer@70 854 drum event about the beat grid. The entropy drops instantaneously at each
samer@70 855 event and rises gradually between events.
samer@70 856 }
samer@70 857 \end{fig*}
samer@70 858
samer@70 859 We carried out the analysis on 16 recordings; an example
samer@70 860 is shown in \figrf{drumfig}. There we can see variations in the
samer@70 861 entropy in the upper graph and the information in each drum event in the lower
samer@70 862 stem plot. At certain points in time, unusually large amounts of information
samer@70 863 arrive; these may be related to fills and other rhythmic irregularities, which
samer@70 864 are often followed by an emphatic return to a steady beat at the beginning
samer@70 865 of the next bar---this is something we are currently investigating.
samer@70 866 We also analysed the pattern of information flow
samer@70 867 on a cyclic metre, much as in \figrf{metre}. All the recordings we
samer@70 868 analysed are audibly in 4/4 metre, but we found no
samer@70 869 evidence of a general tendency for greater amounts of information to arrive
samer@70 870 at metrically strong beats, which suggests that the rhythmic accuracy of the
samer@70 871 drummers does not vary systematically across each bar. It is possible that metrical information
samer@70 872 existing in the pattern of kick and snare events might emerge in an information
samer@70 873 dynamic analysis using a model that attempts to predict the time and type of
samer@70 874 the next drum event, rather than just inferring the beat grid as the current model does.
samer@70 875 %The analysis of information rates can b
samer@70 876 %considered \emph{subjective}, in that it measures how the drum tracker's
samer@70 877 %probability distributions change, and these are contingent upon the
samer@70 878 %model used as well as external properties in the signal.
samer@70 879 %We expect,
samer@70 880 %however, that following periods of increased uncertainty, such as fills
samer@70 881 %or expressive timing, the information contained in an individual event
samer@70 882 %increases. We also examine whether the information is dependent upon
samer@70 883 %metrical position.
samer@70 884
samer@4 885
samer@24 886 \section{Information dynamics as compositional aid}
samer@43 887 \label{s:composition}
samer@43 888
samer@53 889 The use of stochastic processes in music composition has been widespread for
samer@53 890 decades---for instance Iannis Xenakis applied probabilistic mathematical models
samer@53 891 to the creation of musical materials\cite{Xenakis:1992ul}. While such processes
samer@53 892 can drive the \emph{generative} phase of the creative process, information dynamics
samer@53 893 can serve as a novel framework for a \emph{selective} phase, by
samer@53 894 providing a set of criteria to be used in judging which of the
samer@53 895 generated materials
samer@53 896 are of value. This alternation of generative and selective phases as been
samer@70 897 noted before \cite{Boden1990}.
samer@70 898 %
samer@53 899 Information-dynamic criteria can also be used as \emph{constraints} on the
samer@53 900 generative processes, for example, by specifying a certain temporal profile
samer@53 901 of suprisingness and uncertainty the composer wishes to induce in the listener
samer@53 902 as the piece unfolds.
samer@53 903 %stochastic and algorithmic processes: ; outputs can be filtered to match a set of
samer@53 904 %criteria defined in terms of information-dynamical characteristics, such as
samer@53 905 %predictability vs unpredictability
samer@53 906 %s model, this criteria thus becoming a means of interfacing with the generative processes.
samer@53 907
samer@62 908 %The tools of information dynamics provide a way to constrain and select musical
samer@62 909 %materials at the level of patterns of expectation, implication, uncertainty, and predictability.
samer@53 910 In particular, the behaviour of the predictive information rate (PIR) defined in
samer@53 911 \secrf{process-info} make it interesting from a compositional point of view. The definition
samer@53 912 of the PIR is such that it is low both for extremely regular processes, such as constant
samer@53 913 or periodic sequences, \emph{and} low for extremely random processes, where each symbol
samer@53 914 is chosen independently of the others, in a kind of `white noise'. In the former case,
samer@53 915 the pattern, once established, is completely predictable and therefore there is no
samer@53 916 \emph{new} information in subsequent observations. In the latter case, the randomness
samer@53 917 and independence of all elements of the sequence means that, though potentially surprising,
samer@53 918 each observation carries no information about the ones to come.
samer@53 919
samer@53 920 Processes with high PIR maintain a certain kind of balance between
samer@53 921 predictability and unpredictability in such a way that the observer must continually
samer@53 922 pay attention to each new observation as it occurs in order to make the best
samer@53 923 possible predictions about the evolution of the seqeunce. This balance between predictability
samer@53 924 and unpredictability is reminiscent of the inverted `U' shape of the Wundt curve (see \figrf{wundt}),
samer@70 925 which summarises the observations of Wundt \cite{Wundt1897} that stimuli are most
samer@70 926 pleasing at intermediate levels of novelty or disorder, where there is a balance between
samer@53 927 `order' and `chaos'.
samer@53 928
samer@53 929 Using the methods of \secrf{markov}, we found \cite{AbdallahPlumbley2009}
samer@53 930 a similar shape when plotting entropy rate againt PIR---this is visible in the
samer@53 931 upper envelope of the scatter plot in \figrf{mtriscat}, which is a 3-D scatter plot of
samer@53 932 three of the information measures discussed in \secrf{process-info} for several thousand
samer@53 933 first-order Markov chain transition matrices generated by a random sampling method.
samer@53 934 The coordinates of the `information space' are entropy rate ($h_\mu$), redundancy ($\rho_\mu$), and
samer@62 935 predictive information rate ($b_\mu$). The points along the `redundancy' axis correspond
samer@62 936 to periodic Markov chains. Those along the `entropy' axis produce uncorrelated sequences
samer@53 937 with no temporal structure. Processes with high PIR are to be found at intermediate
samer@53 938 levels of entropy and redundancy.
samer@70 939 These observations led us to construct the `Melody Triangle', a graphical interface
samer@53 940 for exploring the melodic patterns generated by each of the Markov chains represented
samer@53 941 as points in \figrf{mtriscat}.
samer@53 942
samer@70 943
samer@70 944 %It is possible to apply information dynamics to the generation of content, such as to the composition of musical materials.
samer@70 945
samer@70 946 %For instance a stochastic music generating process could be controlled by modifying
samer@70 947 %constraints on its output in terms of predictive information rate or entropy
samer@70 948 %rate.
samer@70 949
samer@43 950 \begin{fig}{wundt}
samer@43 951 \raisebox{-4em}{\colfig[0.43]{wundt}}
samer@43 952 % {\ \shortstack{{\Large$\longrightarrow$}\\ {\scriptsize\emph{exposure}}}\ }
samer@43 953 {\ {\large$\longrightarrow$}\ }
samer@43 954 \raisebox{-4em}{\colfig[0.43]{wundt2}}
samer@43 955 \caption{
samer@43 956 The Wundt curve relating randomness/complexity with
samer@43 957 perceived value. Repeated exposure sometimes results
samer@43 958 in a move to the left along the curve \cite{Berlyne71}.
samer@43 959 }
samer@43 960 \end{fig}
hekeus@45 961
hekeus@13 962
hekeus@13 963
samer@23 964 \subsection{The Melody Triangle}
samer@23 965
samer@70 966 The Melody Triangle is an interface for the discovery of melodic
samer@70 967 materials, where the input---positions within a triangle---directly map to information
samer@62 968 theoretic properties of the output.
samer@62 969 %The measures---entropy rate, redundancy and
samer@62 970 %predictive information rate---form a criteria with which to filter the output
samer@62 971 %of the stochastic processes used to generate sequences of notes.
samer@70 972 %These measures
samer@70 973 %address notions of expectation and surprise in music, and as such the Melody
samer@70 974 %Triangle is a means of interfacing with a generative process in terms of the
samer@70 975 %predictability of its output.
samer@23 976
samer@62 977 The triangle is populated with first order Markov chain transition
samer@62 978 matrices as illustrated in \figrf{mtriscat}.
samer@70 979 The distribution of transition matrices in this space forms a relatively thin
samer@70 980 curved sheet. Thus, it is a reasonable simplification to project out the
samer@62 981 third dimension (the PIR) and present an interface that is just two dimensional.
samer@64 982 The right-angled triangle is rotated, reflected and stretched to form an equilateral triangle with
samer@64 983 the $h_\mu=0, \rho_\mu=0$ vertex at the top, the `redundancy' axis down the left-hand
samer@64 984 side, and the `entropy rate' axis down the right, as shown in \figrf{TheTriangle}.
samer@62 985 This is our `Melody Triangle' and
samer@62 986 forms the interface by which the system is controlled.
samer@62 987 %Using this interface thus involves a mapping to information space;
samer@70 988 The user selects a point within the triangle, this is mapped into the
samer@70 989 information space and the nearest transition matrix is used to generate
samer@70 990 a sequence of values which are then sonified either as pitched notes or percussive
samer@70 991 sounds. By choosing the position within the triangle, the user can control the
samer@70 992 output at the level of its `collative' properties, with access to the variety
samer@70 993 of patterns as described above and in \secrf{markov}.
samer@70 994 %and information-theoretic criteria related to predictability
samer@70 995 %and information flow
samer@70 996 Though the interface is 2D, the third dimension (PIR) is implicitly present, as
samer@70 997 transition matrices retrieved from
samer@62 998 along the centre line of the triangle will tend to have higher PIR.
samer@70 999 We hypothesise that, under
samer@62 1000 the appropriate conditions, these will be perceived as more `interesting' or
samer@62 1001 `melodic.'
samer@70 1002
samer@70 1003 %The corners correspond to three different extremes of predictability and
samer@70 1004 %unpredictability, which could be loosely characterised as `periodicity', `noise'
samer@70 1005 %and `repetition'. Melodies from the `noise' corner (high $h_\mu$, low $\rho_\mu$
samer@70 1006 %and $b_\mu$) have no discernible pattern;
samer@70 1007 %those along the `periodicity'
samer@70 1008 %to `repetition' edge are all cyclic patterns that get shorter as we approach
samer@70 1009 %the `repetition' corner, until each is just one repeating note. Those along the
samer@70 1010 %opposite edge consist of independent random notes from non-uniform distributions.
samer@70 1011 %Areas between the left and right edges will tend to have higher PIR,
samer@70 1012 %and we hypothesise that, under
samer@70 1013 %the appropriate conditions, these will be perceived as more `interesting' or
samer@70 1014 %`melodic.'
samer@62 1015 %These melodies have some level of unpredictability, but are not completely random.
samer@62 1016 % Or, conversely, are predictable, but not entirely so.
samer@41 1017
hekeus@45 1018 %PERHAPS WE SHOULD FOREGO TALKING ABOUT THE
hekeus@45 1019 %INSTALLATION VERSION OF THE TRIANGLE?
hekeus@45 1020 %feels a bit like a tangent, and could do with the space..
samer@70 1021 The Melody Triangle exists in two incarnations: a screen-based interface
samer@42 1022 where a user moves tokens in and around a triangle on screen, and a multi-user
samer@42 1023 interactive installation where a Kinect camera tracks individuals in a space and
hekeus@45 1024 maps their positions in physical space to the triangle. In the latter each visitor
hekeus@45 1025 that enters the installation generates a melody and can collaborate with their
samer@62 1026 co-visitors to generate musical textures. This makes the interaction physically engaging
samer@62 1027 and (as our experience with visitors both young and old has demonstrated) more playful.
samer@62 1028 %Additionally visitors can change the
samer@62 1029 %tempo, register, instrumentation and periodicity of their melody with body gestures.
samer@41 1030
samer@70 1031
samer@70 1032 \begin{fig}{mtriscat}
samer@70 1033 \colfig[0.9]{mtriscat}
samer@70 1034 \caption{The population of transition matrices in the 3D space of
samer@70 1035 entropy rate ($h_\mu$), redundancy ($\rho_\mu$) and PIR ($b_\mu$),
samer@70 1036 all in bits.
samer@70 1037 The concentrations of points along the redundancy axis correspond
samer@70 1038 to Markov chains which are roughly periodic with periods of 2 (redundancy 1 bit),
samer@70 1039 3, 4, \etc all the way to period 7 (redundancy 2.8 bits). The colour of each point
samer@70 1040 represents its PIR---note that the highest values are found at intermediate entropy
samer@70 1041 and redundancy, and that the distribution as a whole makes a curved triangle. Although
samer@70 1042 not visible in this plot, it is largely hollow in the middle.}
samer@70 1043 \end{fig}
samer@70 1044
samer@70 1045
samer@70 1046 The screen based interface can serve as a compositional tool.
samer@62 1047 %%A triangle is drawn on the screen, screen space thus mapped to the statistical
samer@62 1048 %space of the Melody Triangle.
samer@62 1049 A number of tokens, each representing a
samer@70 1050 sonification stream or `voice', can be dragged in and around the triangle.
samer@70 1051 For each token, a sequence of symbols is sampled using the corresponding
samer@70 1052 transition matrix, which
samer@70 1053 %statistical properties that correspond to the token's position is generated. These
samer@70 1054 %symbols
samer@70 1055 are then mapped to notes of a scale or percussive sounds%
samer@70 1056 \footnote{The sampled sequence could easily be mapped to other musical processes, possibly over
samer@62 1057 different time scales, such as chords, dynamics and timbres. It would also be possible
samer@70 1058 to map the symbols to visual or other outputs.}%
samer@70 1059 . Keyboard commands give control over other musical parameters such
samer@70 1060 as pitch register and inter-onset interval.
samer@62 1061 %The possibilities afforded by the Melody Triangle in these other domains remains to be investigated.}.
samer@70 1062 %
samer@70 1063 The system is capable of generating quite intricate musical textures when multiple tokens
samer@70 1064 are in the triangle, but unlike other computer aided composition tools or programming
samer@70 1065 environments, the composer excercises control at the abstract level of information-dynamic
samer@70 1066 properties.
samer@70 1067 %the interface relating to subjective expectation and predictability.
samer@23 1068
samer@70 1069 \begin{fig}{TheTriangle}
samer@70 1070 \colfig[0.7]{TheTriangle.pdf}
samer@70 1071 \caption{The Melody Triangle}
samer@70 1072 \end{fig}
hekeus@38 1073
samer@66 1074 \comment{
samer@66 1075 \subsection{Information Dynamics as Evaluative Feedback Mechanism}
samer@66 1076 %NOT SURE THIS SHOULD BE HERE AT ALL..?
samer@42 1077 Information measures on a stream of symbols can form a feedback mechanism; a
hekeus@45 1078 rudimentary `critic' of sorts. For instance symbol by symbol measure of predictive
samer@42 1079 information rate, entropy rate and redundancy could tell us if a stream of symbols
samer@42 1080 is currently `boring', either because it is too repetitive, or because it is too
hekeus@45 1081 chaotic. Such feedback would be oblivious to long term and large scale
hekeus@45 1082 structures and any cultural norms (such as style conventions), but
hekeus@45 1083 nonetheless could provide a composer with valuable insight on
samer@42 1084 the short term properties of a work. This could not only be used for the
samer@42 1085 evaluation of pre-composed streams of symbols, but could also provide real-time
samer@42 1086 feedback in an improvisatory setup.
samer@66 1087 }
hekeus@38 1088
samer@66 1089 \subsection{User trials with the Melody Triangle}
samer@66 1090 We are currently in the process of using the screen-based
samer@66 1091 Melody Triangle user interface to investigate the relationship between the information-dynamic
samer@66 1092 characteristics of sonified Markov chains and subjective musical preference.
samer@66 1093 We carried out a pilot study with six participants, who were asked
samer@66 1094 to use a simplified form of the user interface (a single controllable token,
samer@66 1095 and no rhythmic, registral or timbral controls) under two conditions:
samer@66 1096 one where a single sequence was sonified under user control, and another
samer@70 1097 where an additional sequence was sonified in a different register, as if generated
samer@70 1098 by a fixed invisible token in one of four regions of the triangle. In addition, subjects
samer@66 1099 were asked to press a key if they `liked' what they were hearing.
hekeus@16 1100
samer@66 1101 We recorded subjects' behaviour as well as points which they marked
samer@66 1102 with a key press.
samer@70 1103 Some results for two of the subjects are shown in \figrf{mtri-results}. Though
samer@66 1104 we have not been able to detect any systematic across-subjects preference for any particular
samer@66 1105 region of the triangle, subjects do seem to exhibit distinct kinds of exploratory behaviour.
samer@66 1106 Our initial hypothesis, that subjects would linger longer in regions of the triangle
samer@70 1107 that produced aesthetically preferable sequences, and that this would tend to be towards the
samer@66 1108 centre line of the triangle for all subjects, was not confirmed. However, it is possible
samer@66 1109 that the design of the experiment encouraged an initial exploration of the space (sometimes
samer@70 1110 very systematic, as for subject c) aimed at \emph{understanding} %the parameter space and
samer@70 1111 how the system works, rather than finding musical patterns. It is also possible that the
samer@66 1112 system encourages users to create musically interesting output by \emph{moving the token},
samer@66 1113 rather than finding a particular spot in the triangle which produces a musically interesting
samer@70 1114 sequence by itself.
samer@70 1115
samer@70 1116 \begin{fig}{mtri-results}
samer@70 1117 \def\scat#1{\colfig[0.42]{mtri/#1}}
samer@70 1118 \def\subj#1{\scat{scat_dwells_subj_#1} & \scat{scat_marks_subj_#1}}
samer@70 1119 \begin{tabular}{cc}
samer@70 1120 % \subj{a} \\
samer@70 1121 % \subj{b} \\
samer@70 1122 \subj{c} \\
samer@70 1123 \subj{d}
samer@70 1124 \end{tabular}
samer@70 1125 \caption{Dwell times and mark positions from user trials with the
samer@70 1126 on-screen Melody Triangle interface, for two subjects. The left-hand column shows
samer@70 1127 the positions in a 2D information space (entropy rate vs multi-information rate
samer@70 1128 in bits) where each spent their time; the area of each circle is proportional
samer@70 1129 to the time spent there. The right-hand column shows point which subjects
samer@70 1130 `liked'; the area of the circles here is proportional to the duration spent at
samer@70 1131 that point before the point was marked.}
samer@70 1132 \end{fig}
samer@46 1133
samer@67 1134 Comments collected from the subjects
samer@67 1135 %during and after the experiment
samer@67 1136 suggest that
samer@66 1137 the information-dynamic characteristics of the patterns were readily apparent
samer@66 1138 to most: several noticed the main organisation of the triangle,
samer@70 1139 with repetetive notes at the top, cyclic patterns along one edge, and unpredictable
samer@70 1140 notes towards the opposite corner. Some described their systematic exploration of the space.
samer@70 1141 Two felt that the right side was `more controllable' than the left (a consequence
samer@67 1142 of their ability to return to a particular distinctive pattern and recognise it
samer@70 1143 as one heard previously). Two reported that they became bored towards the end,
samer@70 1144 but another felt there wasn't enough time to `hear out' the patterns properly.
samer@66 1145 One subject did not `enjoy' the patterns in the lower region, but another said the lower
samer@67 1146 central regions were more `melodic' and `interesting'.
samer@4 1147
samer@66 1148 We plan to continue the trials with a slightly less restricted user interface in order
samer@66 1149 make the experience more enjoyable and thereby give subjects longer to use the interface;
samer@66 1150 this may allow them to get beyond the initial exploratory phase and give a clearer
samer@66 1151 picture of their aesthetic preferences. In addition, we plan to conduct a
samer@66 1152 study under more restrictive conditions, where subjects will have no control over the patterns
samer@67 1153 other than to signal (a) which of two alternatives they prefer in a forced
samer@66 1154 choice paradigm, and (b) when they are bored of listening to a given sequence.
samer@66 1155
hekeus@38 1156 %\emph{comparable system} Gordon Pask's Musicolor (1953) applied a similar notion
hekeus@38 1157 %of boredom in its design. The Musicolour would react to audio input through a
hekeus@38 1158 %microphone by flashing coloured lights. Rather than a direct mapping of sound
hekeus@38 1159 %to light, Pask designed the device to be a partner to a performing musician. It
hekeus@38 1160 %would adapt its lighting pattern based on the rhythms and frequencies it would
hekeus@38 1161 %hear, quickly `learning' to flash in time with the music. However Pask endowed
hekeus@38 1162 %the device with the ability to `be bored'; if the rhythmic and frequency content
hekeus@38 1163 %of the input remained the same for too long it would listen for other rhythms
hekeus@38 1164 %and frequencies, only lighting when it heard these. As the Musicolour would
hekeus@38 1165 %`get bored', the musician would have to change and vary their playing, eliciting
hekeus@38 1166 %new and unexpected outputs in trying to keep the Musicolour interested.
samer@4 1167
hekeus@13 1168
samer@70 1169 \section{Conclusions}
samer@61 1170
samer@61 1171 % !!! FIXME
samer@70 1172 %We reviewed our information dynamics approach to the modelling of the perception
samer@70 1173 We have looked at several emerging areas of application of the methods and
samer@70 1174 ideas of information dynamics to various problems in music analysis, perception
samer@70 1175 and cognition, including musicological analysis of symbolic music, audio analysis,
samer@70 1176 rhythm processing and compositional and creative tasks. The approach has proved
samer@70 1177 successful in musicological analysis, and though our initial data on
samer@70 1178 rhythm processing and aesthetic preference are inconclusive, there is still
samer@70 1179 plenty of work to be done in this area: where-ever there are probabilistic models,
samer@70 1180 information dynamics can shed light on their behaviour.
hekeus@50 1181
hekeus@50 1182
hekeus@45 1183
samer@59 1184 \section*{acknowledgments}
samer@51 1185 This work is supported by EPSRC Doctoral Training Centre EP/G03723X/1 (HE),
hekeus@54 1186 GR/S82213/01 and EP/E045235/1(SA), an EPSRC DTA Studentship (PF), an RAEng/EPSRC Research Fellowship 10216/88 (AR), an EPSRC Leadership Fellowship, EP/G007144/1
samer@51 1187 (MDP) and EPSRC IDyOM2 EP/H013059/1.
hekeus@55 1188 This work is partly funded by the CoSound project, funded by the Danish Agency for Science, Technology and Innovation.
samer@61 1189 Thanks also Marcus Pearce for providing the two rule-based analyses of \emph{Two Pages}.
hekeus@55 1190
hekeus@44 1191
samer@59 1192 \bibliographystyle{IEEEtran}
samer@43 1193 {\bibliography{all,c4dm,nime,andrew}}
samer@4 1194 \end{document}