changeset 27:ae650489d3a8

Updated report
author Paulo Chiliguano <p.e.chiilguano@se14.qmul.ac.uk>
date Sun, 30 Aug 2015 15:49:27 +0100
parents e4bcfe00abf4
children a95e656907c3
files Code/content_based.py Code/eda.py Code/eda_discrete.py Report/acknowledgements/acknowledgements.tex Report/chapter1/introduction.tex Report/chapter2/General_model_hybrid_recommender.png Report/chapter2/background.tex Report/chapter2/content-based-recommender1.png Report/chapter2/dnn.png Report/chapter2/item-item1.jpg Report/chapter2/mylenet.png Report/chapter2/three_way_aspect_model.png Report/chapter2/user-user1.jpg Report/chapter2/user_item_data.png Report/chapter3/ch3.tex Report/chapter3/taste_profile.png Report/chapter4/evaluation.tex Report/chapter5/results.tex Report/chapter6/conclusions.tex Report/chiliguano_msc_finalproject.blg Report/chiliguano_msc_finalproject.dvi Report/chiliguano_msc_finalproject.lof Report/chiliguano_msc_finalproject.pdf Report/chiliguano_msc_finalproject.synctex.gz Report/chiliguano_msc_finalproject.tex Report/chiliguano_msc_finalproject.toc Report/references.bib slides/chiliguano_msc_project_slides.tex
diffstat 28 files changed, 1254 insertions(+), 214 deletions(-) [+]
line wrap: on
line diff
--- a/Code/content_based.py	Wed Aug 26 02:00:48 2015 +0100
+++ b/Code/content_based.py	Sun Aug 30 15:49:27 2015 +0100
@@ -112,7 +112,7 @@
                     fn += 1
                 elif rating <= rating_threshold:
                     tn += 1
-    print tp, fp, fn, tn
+    #print tp, fp, fn, tn
     if tp != 0:
         precision = tp / (tp + fp)
         recall = tp / (tp + fn)
@@ -138,7 +138,7 @@
     
     topN = {}
     for user, song_rating in users_train[i].iteritems():
-        topN[user] = top_n(sim_matrix, user, song_rating)
+        topN[user] = top_n(sim_matrix, user, song_rating, rating_threshold=2, N=20)
     elapsed_time = time.time() - start_time
     print 'Training execution time: %.3f seconds' % elapsed_time
         
--- a/Code/eda.py	Wed Aug 26 02:00:48 2015 +0100
+++ b/Code/eda.py	Sun Aug 30 15:49:27 2015 +0100
@@ -219,7 +219,7 @@
 #                fn += 1
 #            elif score <= rating_threshold and sim_value < EDA_treshold:
 #                tn += 1
-    print tp, fp, fn, tn
+    #print tp, fp, fn, tn
     if tp != 0:
         precision = tp / (tp + fp)
         recall = tp / (tp + fn)
@@ -263,7 +263,7 @@
     elapsed_time = time.time() - start_time
     print 'Training execution time: %.3f seconds' % elapsed_time
     
-    pi, ri, fi, ai = evaluate_eda(profile_u, users_test[i])
+    pi, ri, fi, ai = evaluate_eda(profile_u, users_test[i], N=20)
     p = np.append(p, pi)
     r = np.append(r, ri)
     f = np.append(f, fi)
--- a/Code/eda_discrete.py	Wed Aug 26 02:00:48 2015 +0100
+++ b/Code/eda_discrete.py	Sun Aug 30 15:49:27 2015 +0100
@@ -244,7 +244,7 @@
 #                fn += 1
 #            elif score <= rating_threshold and sim_value < EDA_treshold:
 #                tn += 1
-    print tp, fp, fn, tn
+    #print tp, fp, fn, tn
     if tp != 0:
         precision = tp / (tp + fp)
         recall = tp / (tp + fn)
@@ -269,7 +269,7 @@
     elapsed_time = time.time() - start_time
     print 'Training execution time: %.3f seconds' % elapsed_time
     
-    pi, ri, fi, ai = evaluate_eda(profile_u, users_test[i])
+    pi, ri, fi, ai = evaluate_eda(profile_u, users_test[i], N=20)
     p = np.append(p, pi)
     r = np.append(r, ri)
     f = np.append(f, fi)
--- a/Report/acknowledgements/acknowledgements.tex	Wed Aug 26 02:00:48 2015 +0100
+++ b/Report/acknowledgements/acknowledgements.tex	Sun Aug 30 15:49:27 2015 +0100
@@ -3,13 +3,10 @@
 %	Thanks Mum!
 %\end{abstract}
 \section*{Acknowledgements}
-I wish to express my sincere gratitude to Dr. Georgy Fazekas, Lecturer in Digital Media at Queen Mary University of London, for providing me guidance and valuable suggestions during the planning and development of this project. I also wish to acknowledge the help provided by Dr. Mathieu Barthet and Mr. Tim Kay with the access to research servers.
+I wish to express my sincere gratitude to Dr. Georgy Fazekas, Lecturer in Digital Media at Queen Mary University of London, for giving me the opportunity to work on this state-of-the-art field and for his guidance and valuable suggestions during the planning and development of this project. I also wish to acknowledge the supplementary assistance provided by Dr. Tony Stockman and Dr. Mathieu Barthet during my time as a student at Queen Mary University of London.
 \\
 \\
-I would also like to extend my thanks to Dr. Tony Stockman, my academic advisor, for the assistance provided during my time as a student at Queen Mary University of London.
+I am particularly grateful with National Government of the Republic of Ecuador for awarding me with a scholarship to study a postgraduate taught degree at a high-quality research university in the United Kingdom of Great Britain and Northern Ireland.
 \\
 \\
-I am particularly grateful with National Government of the Republic of Ecuador for awarding me with a scholarship to study a postgraduate degree at a high-quality research university in the United Kingdom of Great Britain and Northern Ireland.
-\\
-\\
-Finally, I wish to thank my parents and brothers for their support and encouragement throughout my study.
\ No newline at end of file
+Finally, a special warm thanks goes to my parents, my brothers and Miss Ana Costilla for their support and encouragement throughout my studies.
\ No newline at end of file
--- a/Report/chapter1/introduction.tex	Wed Aug 26 02:00:48 2015 +0100
+++ b/Report/chapter1/introduction.tex	Sun Aug 30 15:49:27 2015 +0100
@@ -6,31 +6,25 @@
 Recommender systems can be described as engines that guide the users to suitable objects from a large number of options in a particular domain such as books, films or music. The available information of users and items' attributes is analysed and exploited by the recommender systems to produce a list of previously unseen items that each user might find enjoyable. Depending on the analysed data, the design of a recommender can be focused on historical ratings given by users or similarities between the attributes of items that an user already rated.
 
 \section{Motivation}
-Due to the available information of relationship between users and items would be sparse, e.g., most part of the users tend to do not give enough ratings, the accuracy of predictions would decrease. Another disadvantage of traditional recommender systems, referred as \textit{cold-start problem}, arises when a new item cannot be recommended until it gets enough ratings, or, equivalently, when a new user does not have any ratings \citep{melville2010recommender}. In order to alleviate the rating sparsity and cold-start problems, there is the motivation to combine two or more recommendation designs into hybrid approaches. 
+Due to the available information of relationship between users and items would be sparse, e.g., most part of the users tend to do not give enough ratings, the accuracy of predictions would decrease. Another disadvantage of traditional recommender systems, referred as \textit{cold-start problem}, arises when a new item cannot be recommended until it gets enough ratings, or, equivalently, when a new user does not have any ratings \parencite{melville2010recommender}. In order to alleviate the rating sparsity and cold-start problems, there is the motivation to combine two or more recommendation techniques into hybrid approaches. 
 
-Deep learning is an approach to artificial intelligence for describing raw data as a nested hierarchy of concepts, with each abstract concept defined in terms of simpler representations. For example, deep learning can describe high-level features of an image of a car such as position, color or brightness of the object, in terms of contours, which are also represented in terms of edges. \citep{Bengio-et-al-2015-Book}  
+Deep learning is an approach to artificial intelligence for describing raw data as a nested hierarchy of concepts, with each abstract concept defined in terms of simpler representations. For example, deep learning can describe high-level features of an image of a car such as position, colour or brightness of the object, in terms of contours, which are also represented in terms of edges. \parencite{Bengio-et-al-2015-Book}  
 
-Inspired in natural evolution of species, Estimation of Distribution Algorithms (EDAs) \citep{larranaga2002estimation} are robust techniques developed during the last decade for optimisation in Statistics and Machine Learning fields. EDAs can capture the explicit structure of a population with a probability distribution estimated from the best individuals of that population.
+Inspired in natural evolution of species, estimation of distribution algorithms (EDAs) \parencite{larranaga2002estimation} are robust techniques developed during the last decade for optimisation in Statistics and Machine Learning fields. EDAs can capture the explicit structure of a population with a probability distribution estimated from the best individuals of that population.
 
-\section{Aim}
-We aim to design and implement a hybrid music recommender to suggest new music tracks that an user would find them appealing and enjoyable. The architecture of our hybrid recommender combines two recommendation techniques. 
+\section{Aims}
+We aim to design and implement a hybrid music recommender to mitigate the cold-start problem in a content-based recommendation strategy. The architecture of our hybrid recommender approach combines two fundamental tasks \parencite{recsys2012}: \textit{user modelling} and \textit{information filtering}. Both of these techniques require user-item data to learn user's interest and select items based on their content description, respectively.
 
-%The first technique is collaborative filtering to predict music preferences on the basis of users' information from an online social network (OSN) such as Last.fm,
+In this project, user-item information is obtained from the Taste Profile dataset, which is a complementary subset of the Million Song Dataset \parencite{Bertin-Mahieux2011} and provides real world listeners activity, i.e., play counts of a song. On the other hand, the items to consolidate the music library are obtained by using the unique identifier of each song to fetch its audio data from 7digital.
 
-the second technique is \textit{content-based filtering} where recommendations are produced by computing similarities between representations of content of items that an user
+A convolutional deep neural network (CDNN), which is a deep learning model, is employed to describe the time-frequency content of each audio clip with a n-dimensional vector, whose dimensions represent the probability of a clip to belong to an specific music genre. In this project, we bound the number of music genres to 10.
 
+As a primary contribution of this project, estimation of distribution algorithms (EDA) are investigated to model user profiles in terms of probabilities of music genres preferences. The algorithms use play count and the content vector of each song in the user's collection to optimise the profile. In addition, the content vector values is are treated as discrete and continuous variables, for evaluation purposes.
 
-in which  are correlated to compute similarities between them.
+Each user profile then is compared with the vector representation of an audio clip to compute the similarity value between them. Recommendations for an user are built up by selecting the clips with highest similarity values.
 
-Users' information is obtained from the Taste Profile dataset, which is a complementary subset of the Million Song Dataset\footnote{http://labrosa.ee.columbia.edu/millionsong/}. The music library that contains sample audio clips of the rated songs in the Taste Profile dataset is consolidated by fetching audio files using 7digital API.
+The evaluation of our hybrid music recommender approach is assessed by comparing the results obtained with a traditional content-based recommender.
 
-A convolutional neural network (CNN), which is a deep learning model, is employed to describe each audio file of the music library with a n-dimensional vector, whose dimensions represent music genres.
-
-An Estimation of Distribution Algorithm (EDA) technique is implemented to model user profiles in terms of music genres in order to compare each profile with the vector representation of the audio clips to compute similarities between them. Recommendation is achieved by choosing the clips with highest similarity values.
-
-The evaluation of our hybrid music recommender will be assessed by comparing the prediction accuracy with a traditional content-based recommender p.
-
-%that as automatically as possible analyses the multi-track input audio signals, finds and measures the masking phenomenon and uses Equaliser to reduce or solve the problem.
-\section{Outline of the thesis}
+\section{Thesis outline}
 
 The rest of the report is organised as follows: Chapter 2 provides an overview in recommender systems. Recommendation process, associated challenges, and related work based on state-of-the-art techniques are discussed. In Chapter 3, we present our proposed hybrid recommendation approach and describe the stages and algorithms in detail. The experiments and evaluation protocols are to assess the performance of the hybrid recommender presented in Chapter 4. We proceed to discuss and analyse the results from the conducted experiments to evaluate the proposed hybrid music recommender. In Chapter 6, we present the conclusions and some thoughts for further research.
\ No newline at end of file
Binary file Report/chapter2/General_model_hybrid_recommender.png has changed
--- a/Report/chapter2/background.tex	Wed Aug 26 02:00:48 2015 +0100
+++ b/Report/chapter2/background.tex	Sun Aug 30 15:49:27 2015 +0100
@@ -1,71 +1,233 @@
 \chapter{Background}
-Recommender systems set up opportunities and challenges for industry to understand consumption behaviour of users. In particular, for music industry, the develop of recommender systems could improve sales for artists and labels, and the discovery of new songs for listeners. However, regarding that music tastes vary from one person to another person, an advantageous music recommender system should be able to infer listeners needs through their historical listening preference information, similarities with another listeners, and audio signal features from their music collections.
+Recommender systems create opportunities and challenges for industry to understand consumption behaviour of users. In particular, for music industry, the development of recommender systems could improve digital music sales \parencite{ringen_2015}, and also, it could assist the listeners to discover new music through their habits \parencite{1_hypebot.com_2015}. However, when there is no priori information of a new introduced item in a recommender system, known as the \textit{cold-start problem}, popular songs could be favoured in recommendation process instead of items in the \textit{long tail}, i.e., songs that do not have enough ratings. Usually, content-based recommender systems are used to solve the cold-start problem because similarities between items are based on the content without regarding the ratings \parencite{Park200811}. Another solution to address the cold-start problem is to combine recommendation techniques to boost the strengths of each technique in an hybrid architecture. \parencite{melville2010recommender}
 
-In the following sections, the importance of online social networks for retrieving user-item information among with previous work on music recommender systems are presented. Subsequently, a novel approach of an hybrid recommender system based on Estimation of Distribution Algorithm (EDA) is introduced and examined.
+In this chapter, we present the importance of online social networks and music services platforms for retrieving user-item information, in conjunction with related work on music recommender systems. Subsequently, a novel approach of an hybrid recommendation model based on estimation of distribution algorithms (EDAs) is introduced and examined.
 
 \section{Online Social Networks}
-\citet{JCC4:JCC4393} describe social network sites (SNSs) as: \begin{quote}``Web-based services that allow individuals to (1) construct a public or semi-public profile within a bounded system, (2) articulate a list of other users with whom they share a connection, and (3) view and traverse their list of connections and those made by others within the system.''\end{quote}
+Social network sites \parencite{JCC4:JCC4393} are defined as: \begin{quote}``Web-based services that allow individuals to (1) construct a public or semi-public profile within a bounded system, (2) articulate a list of other users with whom they share a connection, and (3) view and traverse their list of connections and those made by others within the system.''\end{quote}
 
-During the last decade, online social networks have become the outstanding source of multimedia information.
+During the last decade, online social networks, which are also identified as \textit{social media} platforms, have become the outstanding technologies for retrieving and exchanging multimedia information \parencite{Putzke2014519}. Facebook, Twitter or YouTube, have enabled users to produce and share content on the internet, specially, customers around the world are renovating business models by sharing reviews and comments of products directly to companies. This produced content provides opportunities for research to track consumer's behaviour. \parencite{smith2009social}
 
-social net info edges between user
+In particular, Last.fm\footnote{http://www.last.fm/} is an online radio station that also have the facilities of a social media platform, where a user profile is built up by collecting the music tracks listened on multimedia players through a indexing process called \emph{scrobbling}. This profile may expose music consumption and listening behaviour. \parencite{Putzke2014519}
 
-\subsection{Last.fm}
-Last.fm is a social network system that accumulate a list of played audio tracks from registered users through \emph{scrobbling} to provide to any user a detail about listening preference and taste similarites between connected friends in the network. Last.fm also uses scrobbling to feed its music recommendation service to help to users to discover new artists.
+%\subsection{Last.fm}
+\section{Music services platforms}
+%\subsection{Echonest}
+The Echo Nest\footnote{http://developer.echonest.com/} was a music intelligence company that offered solutions for music discovery and personalisation, dynamic curated sources, audio fingerprinting and interactive music applications. In 2014, The Echo Nest was acquired by Spotify\footnote{https://www.spotify.com/}, which is a commercial music streaming service, where a user can browse and listen music tracks sorted by artists, albums, genres or playlists. 
 
-Users' information such as recently played tracks, loved tracks, or top songs over a time period e.g. weeks, months, can be retrieved by using Last.fm API\footnote{http://www.last.fm/api} methods.
+However, The Echo Nest API is still active for developer community and offers the access to artists, songs, taste profiles and playlists data. Particularly, The Echo Nest API is able to retrieve information limited to a particular music tracks catalogue such as 7digital\footnote{http://developer.7digital.com/}.
 
-\section{Music services platforms}
+Both The Echo Nest and 7digital require to sign up for a free account to get unique keys for OAuth\footnote{http://oauth.net/} authentication in order to retrieve desired information through their respective APIs. As well, free account has limited number of calls, in the case of Echo Nest is limited to 20 request per minute and in the case of 7digital is limited to 4000 request per day.
 
-\subsection{Echonest}
-\subsection{7Digital}
-
-Both Echo Nest and 7digital require to sign up to their API to get unique keys for OAuth authentication in order to retrieve desired information. As well, free account has limited number of calls, in the case of Echo Nest is limited to 20 request per minute and in the case of 7digital is limited to 4000 request per day.
+In this project, we use a The Echo Nest account to get music tracks identifiers for each song in the user-item dataset and we use a 7digital developer account to fetch audio for each music track catalogue identifier. The user-item dataset consist of user - song - play count triplets of the Taste Profile\footnote{http://labrosa.ee.columbia.edu/millionsong/tasteprofile} subset which contains real world listeners activity provided among Echo Nest partners including Last.fm.
+%\subsection{7Digital}
 %\subsection{APIs}
 %The publicly available music related information can be collected from user profiles on social networks using Application Program Interface (API).
-
 %\section{Data Fusion Techniques}
 %Combination of multiple sources of information to obtain more relevant parameters is known as data fusion.
 %In this study, a cooperative data fusion technique is considered to augment information provided from social network source to content-based system features. \citep{Castanedo2013}
 
 \section{Recommender Systems}
+Recommender systems are software or technical facilities to provide items suggestions or predict customer preferences by using prior user information. These systems play an important role in commercial applications to increase sales and convey user satisfaction. In general, recommender systems can be categorised in two major groups: collaborative filtering and content-based filtering \parencite{melville2010recommender}.
 
-Recommender systems are software or technical facilities to provide items suggestions or predict customer preferences. These systems play an important role in commercial applications to increase items sales and user satisfaction. In general, recommender systems can be categorised in the following groups: collaborative filtering and content-based methods. 
+\textcite{1242} considers also another methods for music recommendation such as \textit{demographic filtering} and \textit{named context-based}.
 
 \subsection{Collaborative filtering}
-In collaborative filtering (CF), recommendations are based on correlation between users' ratings or they can be predicted from historical user data. The strength of CF is that the recommendation process is independent from the item features. On the other hand, CF would not be suitable if the user-item matrix is sparse. \citep{Burke2002331}
+In collaborative filtering (CF) \parencite{Yao2015453}, a \emph{m$\times$n} rating matrix (Figure~\ref{fig:useritemmatrix}) represents the relationships between \textit{m} users and \textit{n} items.
+\begin{figure}[ht!]
+	\centering
+	\includegraphics[width=\textwidth]{chapter2/user_item_data.png}
+	\caption{Collaborative filtering process \parencite{sarwar2001item}}
+	\label{fig:useritemmatrix}
+\end{figure}
 
-\subsection{Content-based methods}
-Content-based methods build user profiles by analysing the users' rated items. Each profile is then processed to be correlated with another item, which has not been rated, to compute the interest of the user on this object. \citep{Lops2011}
+Recommendations are based on the computed similarities between rows (for users) or columns (for items), hence, CF can be further subdivided in the following neighbourhood models \parencite{Hu2008263}:
 
-\section{Hybrid recommender methods}
-Hybrid recommendation is based on the combination of techniques mentioned above, by using the advantages of one system to compensate the disadvantages of the other system. 
+\begin{itemize}
+	\item \textbf{User based} collaborative filtering, produce a recommendation of a previously unseen item based on similarity between users (Figure~\ref{fig:userbasedcf}).
 
-In this project, CF, that provides song ratings, is integrated with a content-based method, that compare spectral features of song to achieve hybridisation. 
+	\begin{figure}[h!]
+		\centering
+		\includegraphics[width=0.5\textwidth]{chapter2/user-user1.jpg}
+		\caption{User based collaborative filtering \parencite{1_siddharths_blog_2013}}
+		\label{fig:userbasedcf}
+	\end{figure}
+	
+	\item \textbf{Item based} collaborative filtering, produce a recommendation by comparing the similarities between a previously unseen item and the user's items (Figure ~\ref{fig:itembasedcf}).
+	
+	\begin{figure}[ht!]
+		\centering
+		\includegraphics[width=0.4\textwidth]{chapter2/item-item1.jpg}
+		\caption{Item based collaborative filtering \parencite{1_siddharths_blog_2013}}
+		\label{fig:itembasedcf}
+	\end{figure}
+	
+\end{itemize}
+
+Similarities between a pair of users \emph{a,u} are usually computed with Pearson correlation metric \parencite{sarwar2001item}, given by Equation~\eqref{eq:pearson}:
+\begin{equation}
+	sim(a,u) =\frac{\sum _{i\in I}(r_{a,i} - \bar{r}_a)(r_{u,i} - \bar{r}_u)}{\sqrt{\sum _{i\in I}(r_{a,i} - \bar{r}_a)^2} \sqrt{\sum _{i\in I}(r_{u,i} - \bar{r}_u)^2}}
+	\label{eq:pearson}
+\end{equation}
+where \emph{I} is the set of items rated by both users, $r_{u,i}$ is the rating given to item \emph{i} by user \emph{u}, and $\bar{r}_u$ is the mean rating given by user \emph{u}. Equivalently, for similarities between a pair of items \emph{i,j}, the correlation is given by Equation~\eqref{eq:pearson2}:
+\begin{equation}
+sim(i,j) =\frac{\sum _{u\in U}(r_{u,i} - \bar{r}_i)(r_{u,j} - \bar{r}_j)}{\sqrt{\sum _{u\in U}(r_{u,i} - \bar{r}_i)^2} \sqrt{\sum _{u\in U}(r_{u,j} - \bar{r}_j)^2}}
+\label{eq:pearson2}
+\end{equation}
+where \emph{U} is the set of users who have rated both items, $r_{u,i}$ is the rating given to item \emph{i} by user \emph{u}, and $\bar{r}_i$ is the mean rating given to item \emph{i}.
+
+The strength of CF is that the recommendation process is independent of the item features \parencite{Burke2002331}. On the other hand, CF would not be suitable technique when the user-item matrix is sparse. Moreover, CF considers only the most rated items, therefore, ignores the items in the long tail, and it is unable to handle the cold start problem. \parencite{Dai20141760}
+
+\subsubsection{The cold start problem}
+Recommendation process in CF might be difficult either for a user or an item with few ratings. \parencite{Burke2002331}
+
+\subsubsection{The long tail phenomenon}
+The \textit{long tail} items according to \textcite{Yin2012896} are referred to products with a low volume of sales but they can be more profitable than the popular items if they are recommended to the right consumers.
+
+\subsection{Content-based filtering}
+Content based (CB) filtering is based on the analysis of the features that describe the items. The recommendation component consists in matching up the attributes of the items that a user has already rated, usually referred as the \textit{user profile} \parencite{Lops2011}, against the attributes of a previously unseen products to produce a list of \emph{top-N} recommendations. Figure~\ref{fig:cb} shows the architecture of content-based recommendation process.
+\begin{figure}[ht!]
+	\centering
+	\includegraphics[width=0.8\textwidth]{chapter2/content-based-recommender1.png}
+	\caption{Content-based filtering process \parencite{1_blogseagatesoftcom_2015}}
+	\label{fig:cb}
+\end{figure}
+
+One of the strengths of CB filtering is that recommendation process is entirely based on the attributes of the items, thus, the recommendations produced for each user is independent from the other users information. Also, a CB recommender allows to recommend items that do not have any ratings, therefore, they can diminish the effects of cold-start problem. \parencite{Lops2011}
+
+\subsubsection{Limitations of CB filtering}
+One disadvantage of CB filtering is that personal reviews are not considered in the recommendation process, because this technique is limited to explicit representation of items~\parencite{1242}. Moreover, some representations limit the description to certain aspects only~\parencite{Lops2011}.
+
+Another limitation of CB might be the collection of external data due to restricted access, e.g., the Million Song Dataset \parencite{Bertin-Mahieux2011} does not provide audio data due to copyright restrictions\footnote{http://labrosa.ee.columbia.edu/millionsong/pages/can-i-contact-you-privately-get-audio} and some preview clips are not available in the 7digital UK music catalogue. 
+%Content-based methods build user profiles by analysing the users' rated items. Each profile is then processed to be correlated with another item, which has not been rated, to compute the interest of the user on this object. \parencite{Lops2011}
+
+\subsection{Item Representation}
+Items require an accurate description to achieve upstanding results for recommending items to users \Autocite{1242}. In majority of the content-based filtering systems, item attributes are textual features extracted from web resources. \parencite{Lops2011}
+
+In our approach, we describe the songs in terms of n-dimensional vectors. Each dimension in the vector represent the probability of the song to belong to a music genre. The probalitity estimation is obtained from a music classifier implemented with a deep learning technique. The song representation process is illustrated in section~\ref{subsec:genre}
+
+\subsection{User Modelling}
+``User modeling [sic] is a discipline that deals with both how information about the user can be acquired and used by an automated system.''~\parencite{recsys2012}
+
+Modelling a user profile consists of designing a structure for recording the interests which describe a user. There are several techniques for modelling an user profiles: vector, connexion, ontology and multidimensional representation. \parencite{DBLP:journals/corr/abs-1305-1114}
+
+In our project, we model each user profile through EDAs by minimising a fitness function. The parameters of the fitness function are the rating and similarity values of each song that a user has listened. The user profile is also represented in a n-dimensional vector of probabilities of music genres. This process is illustrated in section~\ref{subsec:profile}
+
+\subsection{Hybrid recommender approaches}
+An hybrid recommender system is developed through the combination of the recommendation techniques mentioned in the previous sections. Usually, hybrid approaches boost the advantages of CF by considering the user's feedback and the advantages of CB by taking into count the item attributes.
+
+According to \textcite{Burke2002331}, there are the following combination methods to accomplish hybridisation:
+\begin{itemize}
+	\item \textbf{Weighted} method, where a single item recommendation is computed as a linear combination of the recommendation value from each technique involved. The weight assigned to each recommender can be adjusted by considering additional feedback from the user.
+	\item \textbf{Switching} method, where the hybrid system uses a criteria depending on the input data to switch between recommendation techniques implemented in the system.
+	\item \textbf{Mixed} method, where recommendations from several different types of recommender are presented simultaneously.
+	\item \textbf{Feature combination} method, where CF results are treated as additional attributes of a CB filtering recommender.
+	\item \textbf{Cascade} method, where one recommender refines the coarse recommendations set given by the first recommender. This method is more efficient than the weighted method, because cascade implementation do not process every item at each stage.
+	\item \textbf{Feature augmentation} method, where the rating of an item from one recommender is used as an input feature of another recommendation technique.
+	\item \textbf{Meta-level} method, where a model generated for user's interest representation using one recommendation technique is used as the input of another recommender system. The advantage of this method is the performance of the second recommender that uses the compressed representation instead of sparse raw data.
+\end{itemize}
+
+The hybrid music recommender approach in this project can be considered as implementation of feature augmentation method and a meta-level method. First, user profiles are generated using the rating matrix and the song vector representation. Next, the model generated is the input of a CB recommender to produce \emph{top-N} recommendations. The general model of our hybrid recommender is shown in Figure~\ref{fig:generalhybrid}
+\begin{figure}[ht!]
+	\centering
+	\includegraphics[width=\textwidth]{chapter2/General_model_hybrid_recommender.png}
+	\caption{Content-based filtering process}
+	\label{fig:generalhybrid}
+\end{figure}
+
 %is based on a three-way aspect model \citep{Yoshii2008435}. Real item ratings are obtained through Last.fm API and spectral information are represented by convolutional deep belief networks (CDBN) features computed from items' spectrogram \citep{Lee20091096}.
 
+\section{Music Information Retrieval}
+Music Information Retrieval (MIR) \parencite{Casey2008668} is a field of research for better human understanding of music data in an effort to reduce the \textit{semantic gap} \parencite{Celma2006} between high-level musical information and low-level audio data. Applications of MIR include artist identification, genre classification and music recommender systems~\parencite{weston2012latent,Yoshii2008435}.
 
-\section{Music Information Retrieval}
-Music Information Retrieval (MIR) is an extend of audio signal processing for understanding the usefulness and applications of music data by using time-frequency representations or low-level features. Applications of MIR include artist identification, genre classification and music recommendation.
-\subsection{Musical genre classification}
-Music classification is one of the principal components for clustering audio tracks based on similarities between features of pieces of music. Automatic musical genre classification approach proposed by \citet{Tzanetakis2002293}, which uses GTZAN genre dataset\footnote{http://marsyas.info/downloads/datasets.html}, has been widely used in the past decade. Nonetheless, the GTZAN dataset has inaccuracies \citep{Sturm20127}, it still provides an useful baseline to compare musical genre classification systems.
+\subsection{Genre classification}
+Music classification is one of the main tasks in MIR for clustering audio tracks based on similarities between features of pieces of music. Automatic musical genre classification approach proposed by \textcite{Tzanetakis2002293}, which uses GTZAN genre dataset\footnote{http://marsyas.info/downloads/datasets.html}, has been widely used in the past decade. The GTZAN dataset consists of a total of 1,000 clips, corresponding to 100 examples for each of the 10 genres. The duration of each clip is 30 seconds.
 
-\subsection{Deep Learning}
-One of the aims of learning algorithms is to identify high-level features that help us make sense of an observed data., e.g. genre, mood or release time in a music library. However, it could be difficult to compute these abstract features directly from audio waveforms. Deep learning can solve the difficulty of extracting high-level representations by expressing them in terms of simpler features, e.g. spectrograms. Deep learning allows the computer to build complex concepts out of simpler concepts. \citep{Bengio-et-al-2015-Book}
+Nonetheless, the GTZAN dataset has inaccuracies~\parencite{Sturm20127}, it still provides an useful baseline to compare genre classifiers.
 
-\citet{Sigtia20146959} examined and compared three implementations of deep neural networks to learn features for music genre classification, using Rectifier Linear Units (ReLUs), dropout regularisation and Hessian Free optimization.
+\subsection{Music recommender systems}
+\subsubsection{Collaborative retrieval music recommender}
+\textcite{weston2012latent} proposed a latent \textit{collaborative retrieval} algorithm using the \textit{Last.fm Dataset - 1K users}\footnote{http://www.dtic.upf.edu/∼ocelma/MusicRecommendationDataset/lastfm-1K.html} dataset. For each (artist, song) tuplet in the dataset, they computed the audio data using 39-dimensional vector corresponding to 13 Mel Frequency Cepstral Coefficients (MFCCs),and the first and the second derivatives. The vectors obtained are used to build up a dictionary using the K-means algorithm. Each audio frame is represented with a vector that contains the number of occurrences of a dictionary vector in the frame. The collaborative retrieval algorithm present outperforming results compared with the Singular Value Decomposition (SVD) and Non-negative Matrix Factorization (NMF) methods used on collaborative filtering recommendation tasks. 
 
-\subsection{Convolutional Neural Networks}
-Convolutional Neural Networks (CNNs) are type of neural networks that uses convolution operation instead of matrix multiplication for processing data that has grid-like topology \citep{Bengio-et-al-2015-Book} such as images collection.
+\subsubsection{Hybrid music recommender}
+\textcite{Yoshii2008435} proposed a hybrid recommender system considering rating scores collected from Amazon.co.jp and acoustic features derived from the signals of musical pieces corresponding to Japanese CD singles that were ranked in weekly top-20 from April 2000 to December 2005. Acoustic features for each piece are represented as a \textit{bag-of-timbres}, i.e., a set of weights of polyphonic timbres, equivalent to a 13-dimensional MFCC representation. Bags of timbres are computed with a Gaussian Mixture Model, considering the same combination of Gaussians for all the pieces.
 
-\citet{NIPS2013_5004} used a convolutional network approach to predict latent factors from music audio in a content-based recommendation system.
+A three-way aspect model (see Figure~\ref{fig:threeway}) is used to decompose the joint probability of users \emph{U}, pieces \emph{M} and features \emph{T} into a set of latent genre variables \emph{Z}. It is assumed that user \emph{u} stochastically choose a genre \emph{z} according to their preferences and then the genre \emph{z} stochastically generates a piece of music \emph{m} and an acoustic feature \emph{t}.
+\begin{figure}[ht!]
+	\centering
+	\includegraphics[width=0.5\textwidth]{chapter2/three_way_aspect_model.png}
+	\caption{Three-way aspect model~\parencite{Yoshii2008435}}
+	\label{fig:threeway}
+\end{figure}
+
+The results of the comparative experiments revealed that three-way aspect hybrid method outperformed the CF and CB recommendation techniques in terms of  accuracy considering $\vert T\vert=64$ features and $\vert Z\vert=10$ latent variables.
+
+\section{Deep Learning}
+High-level features that help us make sense of an observed data., e.g. genre, mood or release time in a music library could be difficult to compute. Deep learning algorithms allows us to build complex concepts out of simpler concepts \parencite{Bengio-et-al-2015-Book}. Deep learning can solve the difficulty of representing high-level features, e.g., perceived genre in a piece of music, by expressing them in terms of low-level signal features, e.g. spectrum, frequency or pitch.
+
+In MIR, deep learning methods capture the attention of researchers for the following reasons~\parencite{kereliuk15}:
+\begin{itemize}
+	\item Hierarchical representations of structures in data.
+	\item Efficient feature learning and classification algorithms.
+	\item Open and publicly available implementations, e.g., \textit{Theano}~\parencite{Bastien-Theano-2012, bergstra+al:2010-scipy} library for Python.
+\end{itemize}
+
+These advantages of deep learning methods enable us to learn abstractions from music low-level content in order to reduce the \textit{semantic gap}~\parencite{Celma2006} in MIR. Additionally, feature extraction does not require significant domain knowledge compared to \textit{hand-crafted} engineering. Nonetheless, deep learning implementations require a lot of data.
+
+\subsection{Deep Neural Networks}
+A deep neural network (DNN) \parencite{hinton2012deep} is defined as a feed-forward artificial neural network with more than one layer of hidden units between the input and the output layer (see Figure~\ref{fig:dnn}).
+\begin{figure}[ht!]
+	\centering
+	\includegraphics[width=\textwidth]{chapter2/dnn.png}
+	\caption{Schematic representation of a deep neural network~\parencite{1_brown_2014}}
+	\label{fig:dnn}
+\end{figure}
+
+Each hidden unit \emph{j} maps its total input from the layer below $x_j$, given by Equation~\eqref{eq:hiddenunit} 
+\begin{equation}
+x_j =b_j+\sum_{i}^{}y_iw_{ij}
+\label{eq:hiddenunit}
+\end{equation}
+where $b_j$ is the bias of unit \emph{j}, \emph{i} is an index over units in the
+layer below, and $w_{ij}$ is the weight on a connection to unit \emph{j}
+from unit \emph{i} in the layer below, to a scalar value $y_j$ that is directed to the layer above. The activation function of hidden units can be hyperbolic tangent, logistic or rectifier linear activation function. For classification, output unit \emph{j} converts its total input $x_j$ into a class probability $p_j$ by using the \textit{softmax} nonlinearity, given by Equation~\eqref{eq:softmax}
+\begin{equation}
+p_j =\frac{\exp x_j}{\sum_{k}^{}\exp x_k}
+\label{eq:softmax}
+\end{equation}
+where \emph{k} is the number of classes.
+\subsubsection{Music Feature Learning}
+\textcite{Sigtia20146959} examined and compared deep neural networks to discover features from the GTZAN dataset and the ISMIR 2004 genre classification dataset\footnote{http://ismir2004.ismir.net/genre\_contest/}, using rectifier linear units (ReLUs) and dropout regularisation. The GTZAN dataset was divided into four 50/25/25 train, validation, test parts.
+
+For each audio clip, they calculated the Fast Fourier Transform (FFT) on frames of length 1024 samples (22050 kHz sampling rate) with a window overlap of 50\%. Next, they used the magnitude of each FFT frame resulting in a 513 dimensional vector. And then, each feature dimension is normalised to have zero mean and unit standard deviation.
+
+For the deep neural network, the 500 hidden units were trained with stochastic gradient descent (SGD) with a learning rate of 0.01, a patience of 10 and a dropout rate of 0.25.
+
+The system classifies the GTZAN data with an accuracy of 83$\pm$1.1\%, a value of the same order of results obtained with hand-crafted features.
+
+\subsection{Convolutional Deep Neural Networks}
+A convolutional deep neural network (CDNN)~\parencite{Bengio-et-al-2015-Book} is a type of artificial neural network that uses convolution operation instead of matrix multiplication for processing data that has grid-like topology, designed to recognize visual patterns directly from pixel images. LeNet-5~\parencite{1_lecun_2015} (see Figure~\ref{fig:lenet}) is one model of convolutional network designed for recognition of handwritten and machine-printed characters.  
+
+\begin{figure}[h!]
+	\centering
+	\includegraphics[width=1\textwidth]{chapter2/mylenet.png}
+	\caption{Convolutional deep neural network LeNet model \parencite{1_deeplearning.net_2015}}
+\label{fig:lenet}
+\end{figure}
+
+\subsubsection{Deep content-based music recommendation}
+\textcite{NIPS2013_5004} used a CDNN to predict latent vectors for each audio track in a content-based recommendation system.
+
+Weighted matrix factorisation
+Demostrating a better performance compared to MFCC attributes
+similar architecture CNN
 
 \section{Estimation of Distribution Algorithms}
-Estimation of distribution algorithms (EDAs) \citep{pelikan2015estimation} are optimisation techniques by constructing a probabilistic model from a sample of solutions, generating a new population and leading to an optimal solution \citep{Santana:Bielza:Larrañaga:Lozano:Echegoyen:Mendiburu:Armañanzas:Shakya:2009:JSSOBK:v35i07}.
+Estimation of distribution algorithms (EDAs) \parencite{pelikan2015estimation} are optimisation techniques by constructing a probabilistic model from a sample of solutions, generating a new population and leading to an optimal solution \parencite{Santana:Bielza:Larrañaga:Lozano:Echegoyen:Mendiburu:Armañanzas:Shakya:2009:JSSOBK:v35i07}.
 
-\citet{Liang2014781} exploited an EDA to model user profiles by using weighted featured vectors of keywords from a set of items that the user had rated above a threshold.
+\textcite{Liang2014781} exploited an EDA to model user profiles by using weighted featured vectors of keywords from a set of items that the user had rated above a threshold.
 
-These algorithms were applied in complex problems such as load balancing for mobile networks \citep{Hejazi15} or software reliability prediction 
-\\
-\\
-In this chapter, previous work on recommender systems has been reviewed and novelty techniques for representing acoustical features and for modelling user profiles has been presented. The next step is to implement the algorithms to collect the dataset by crawling online social information, to extract the acoustical features of a collection of songs for representing them as vectors, to model the user profiles by an EDA, and therefore, to return predicted recommendations.
\ No newline at end of file
+These algorithms were applied in complex problems such as load balancing for mobile networks \parencite{Hejazi15} or software reliability prediction 
+
+\section{Summary}
+In this chapter, previous work on recommender systems has been reviewed and novelty techniques to representing acoustical features and to model user profiles has been presented. The next steps are to collect the dataset by crawling online social information, to extract the acoustical features of a collection of songs to represent them as n-dimensional vectors, to model the user profiles by using EDAs, and therefore, to return a list of song recommendations.
\ No newline at end of file
Binary file Report/chapter2/content-based-recommender1.png has changed
Binary file Report/chapter2/dnn.png has changed
Binary file Report/chapter2/item-item1.jpg has changed
Binary file Report/chapter2/mylenet.png has changed
Binary file Report/chapter2/three_way_aspect_model.png has changed
Binary file Report/chapter2/user-user1.jpg has changed
Binary file Report/chapter2/user_item_data.png has changed
--- a/Report/chapter3/ch3.tex	Wed Aug 26 02:00:48 2015 +0100
+++ b/Report/chapter3/ch3.tex	Sun Aug 30 15:49:27 2015 +0100
@@ -1,45 +1,67 @@
 \chapter{Methodology}
-The methodology used to develop the hybrid music recommender consists of three main stages. First, the collection of real users' data corresponding to  the number of playings of specific songs and the retrieval of audio samples of the identified songs in the users' data. Secondly, the implementation of the deep learning algorithm to represent the songs as vectors and the EDA to model the user profiles  
+The methodology used to develop our hybrid music recommender consists of three main stages. First, the collection of real world user-item data corresponding to the play counts of specific songs and the fetching of audio clips of the unique identified songs in the dataset. Secondly, the implementation of the deep learning algorithm to represent the audio clips in terms of music genre probabilities as n-dimensional vectors. Finally, we investigate estimation of distribution algorithms to model user profiles based on the rated songs above a threshold. Every stage of our hybrid recommender is entirely done in Python 2.7\footnote{https://www.python.org/download/releases/2.7/}.
+
 \section{Data collection}
-The Million Song Dataset \citep{Bertin-Mahieux2011} is a collection of audio features and metadata for a million contemporary popular music tracks which purpose in MIR is to provide a ground truth for evaluation research. This collection is also complemented by the Taste Profie subset \footnote{http://labrosa.ee.columbia.edu/millionsong/tasteprofile} which provides 48,373,586 triplets that consists of Last.fm user ID, Echo Nest song ID and play count of the song.
+The Million Song Dataset \parencite{Bertin-Mahieux2011} is a collection of audio features and metadata for a million contemporary popular music tracks which provides ground truth for evaluation research in MIR. This collection is also complemented by the Taste Profie subset which provides 48,373,586 triplets, each of them consist of anonymised user ID, Echo Nest song ID and play count. We choose this dataset because it is publicly available data and it contains enough data for user modelling and recommender evaluation.
 
 \subsection{Taste Profile subset cleaning}
-Due to potential mismatches between song IDs and track IDs on the Echo Nest database, it is required to filter out the wrong matches in the Taste Profile subset. A Python script is implemented to discard the triplets that contain the song ID values from the mismatches list available also on the Million Song Dataset webpage. The resulting triplets are stored in a new CSV file.
+Due to potential mismatches\footnote{http://labrosa.ee.columbia.edu/millionsong/blog/12-2-12-fixing-matching-errors} between song ID and  track ID on the Echo Nest database, it is required to filter out the wrong matches in the Taste Profile subset. The cleaning process is illustrated in Figure~\ref{fig:taste_profile} 
+
+\begin{figure}[h!]
+	\centering
+	\includegraphics[width=1\textwidth]{chapter3/taste_profile.png}
+	\caption{Cleaning of the taste profile subset}
+	\label{fig:taste_profile}
+\end{figure}
+%Please see figure ~\ref{fig:JobInformationDialog} 
+
+A script is implemented to discard the triplets that contain the song identifiers from the mismatches text file. First, we load the file to read each line of it to obtain song identifier. The identifiers are stored as elements of a set object to construct a collection of unique elements. Next, due to the size of the Taste Profile subset (about 3 GB, uncompressed), we load the dataset by chunks of 20,000 triplets in a \textit{pandas}\footnote{http://pandas.pydata.org/} dataframe to clean each chunk by discarding the triplets that contains the song identifiers in the set object of the previous step. The cleaning process takes around 2.47 minutes and we obtain 45,795,100 triplets. 
+
+In addition to the cleaning process, we reduce significantly the size of the dataset for experimental purposes. We only consider users with more than 1,000 played songs and select the identifiers of 1,500 most played songs. This additional process takes around 3.23 minutes and we obtain 65,327 triplets.
+
 %count resulting number of triplets
-
-
 %At this stage, similarities between users is calculated to form a neighbourhood and predict user rating based on combination of the ratings of selected users in the neighbourhood.
 
-\subsection{Audio clips retrieval}
-The list of songs IDs from the triplets obtained in the last step are used to retrieve the track IDs through a Python script that includes the Pyechonest \footnote{http://echonest.github.io/pyechonest/} package which allow us to acquire track ID with \emph{get\_tracks} method through Echo Nest API\footnote{http://developer.echonest.com} requests. The reason behind obtaining track IDs is because for each ID we can retrieve a 30-60 seconds preview audio clips through 7digital API\footnote{http://developer.7digital.com}.
+\subsection{Fetching audio data}
+First, for each element of the list of 1,500 songs identifiers obtained in the previous step is used to retrieve the correspondent Echo Nest track ID through a script using the \emph{get\_tracks} method from the \textit{Pyechonest}\footnote{http://echonest.github.io/pyechonest/} package which allow us to acquire track ID and preview URL for each song ID through Echo Nest API. The reason behind this is 7digital API uses Echo Nest track ID instead of song ID to retrieve any data from its catalogue. If the track information of a song is not available, we skip to retrieve the Echo Nest information of the next song ID.
 
-Additionally, the Python script accumulates the song ID, the URL, artist and song metadata of each track available in a text file. If the track for a song ID is not available, the script skips to the next song ID to retrieve information of it. The generated text file can be used to reduce more the triplets dataset from the last section. 
+Moreover, for each preview URL obtained in the previous step, we can fetch an audio clip of 30 to 60 seconds of duration through a OAuth request to 7digital API. For this particular API requests, we use the GET method of the request class from the \textit{python-oauth2}\footnote{https://github.com/jasonrubenstein/python\_oauth2} package, because every request require a nonce, timestamp and a signature method, and also, the country parameter, e.g., 'GB' to access to UK catalogue. Before running the script, it is useful to check if the provided 7digital API keys and the country parameter are enabled in the \textit{OAuth 1.0 Signature Reference Implementation}\footnote{http://7digital.github.io/oauth-reference-page/} for 7digital.
+
+Additionally, the script accumulates the Echo Nest song identifier, track ID, artist name, song title and the 7digital preview audio URL for each downloaded track in a text file. If a preview audio clip is not available, the script skip to the next song ID. The generated text file is used to reduce more the triplets dataset from previous section. 
 
 %include number of tracks available
 
 %Classifier creates a model for each user based on the acoustic features of the tracks that user has liked.
 \subsection{Intermediate time-frequency representation for audio signals}
-For representing audio waveforms of the song collection obtained through 7digital API, a similar procedure suggested by \citet{NIPS2013_5004} is followed:
+For representing audio waveforms of the song collection obtained through 7digital API, a similar procedure suggested by \textcite{NIPS2013_5004} is followed:
 \begin{itemize}
 	\item Read 3 seconds of each song at a sampling rate of 22050 Hz and mono channel.
 	\item Compute log-mel spectrograms with 128 components from windows of 1024 frames and a hop size of 512 samples.
 \end{itemize}
 
-The Python script for feature extraction implemented by \citet{Sigtia20146959} is modified to return the log-mel spectrograms by using the LibROSA\footnote{https://bmcfee.github.io/librosa/index.html} package.
+The Python script for feature extraction implemented by \textcite{Sigtia20146959} is modified to return the log-mel spectrograms by using the LibROSA\footnote{https://bmcfee.github.io/librosa/index.html} package.
 
-``Representations of music directly from the temporal or spectral domain can be very sensitive to small time and frequency deformations''. \citep{zhang2014deep}
+``Representations of music directly from the temporal or spectral domain can be very sensitive to small time and frequency deformations''. \parencite{zhang2014deep}
 \section{Data preprocessing}
 \begin{itemize}
-	\item Rating complementary cumulative distribution
-	\item Flatenning spectrogram Sidsig
+	\item Rating from complementary cumulative distribution.
+	\item Flatenning spectrogram.
 \end{itemize}
 \section{Algorithms}
-\subsection{CNN architecture}
+\subsection{Music genre classifier}
+\label{subsec:genre}
 The input of the CNN consist of the 128-component spectrograms obtained in feature extraction. The batch size considered is 20 frames.
 Each convolutional layer consists of 10 kernels and ReLUs activation units. In the first convolutional layer the pooling size is 4 and in the second layer the pooling size is 2. The filters analyses the frames along the frequency axis to consider every Mel components with a hop size of 4 frames in the time axis. Additionally, there is a hidden multi perceptron layer with 513 units.
 %Deep belief network is a probabilistic model that has one observed layer and several hidden layers.
-\subsubsection{Genre classification}
+\subsubsection{CNN network architecture}
 The classification of genre for each frame is returned by negative log likelihood estimation of a logistic stochastic gradient descent (SGD) layer.
-\subsection{Continuous Bayesian EDA}
-\subsection{EDA-based hybrid recommender}
 
+In our testing, we obtained a 38.8 \% of classification error after 9 trials using the GTZAN dataset. More details of classification results are shown on Table \ref{table:genre}.
+
+
+\subsection{User profile modelling}
+\label{subsec:profile}
+\subsubsection{Permutation EDA}
+\subsubsection{Continuous Univariate Marginal Distribution Algorithm}
+
+\subsection{Song recommendation}
\ No newline at end of file
Binary file Report/chapter3/taste_profile.png has changed
--- a/Report/chapter4/evaluation.tex	Wed Aug 26 02:00:48 2015 +0100
+++ b/Report/chapter4/evaluation.tex	Sun Aug 30 15:49:27 2015 +0100
@@ -1,6 +1,8 @@
 \chapter{Experiments}
 
-In order to evaluate the performance of a recommender system, there are several scenarios to be considered depending on the structure of the dataset and the prediction accuracy. It is therefore necessary to determine a suitable experiment for evaluation of the proposed hybrid music recommendation system that employs an user-item matrix and vector representation for songs as inputs to predict ratings of items that an user has not previously listened to. In addition, the performance of the hybrid approach is compared with a pure content-based recommender algorithm. 
+In order to evaluate the performance of a recommender system, there are several scenarios to be considered depending on the structure of the dataset and the prediction accuracy. It is therefore necessary to determine a suitable experiment for the evaluation of our proposed hybrid music recommender that employs a rating matrix and vector representation of songs as inputs to produce \textit{top-N} song recommendations.
+
+In addition, the performance of our hybrid approaches is compared with a pure content-based recommender algorithm.
 
 %\section{Experiment aims}
 %deviation between the actual and predicted ratings is measured 
@@ -9,28 +11,50 @@
 \section{Evaluation for recommender systems}
 
 \subsection{Types of experiments}
-The scenarios for experiments requires to define an hypothesis, controlling variables and generalization of the results. Three types of experiments \citep{export:115396} can be used to compare and evaluate recommender algorithms:
+The scenarios for experiments requires to define an hypothesis, controlling variables and generalization of the results. Three types of experiments \parencite{export:115396} can be used to compare and evaluate recommender algorithms:
 \begin{itemize}
 \item \textbf{Offline experiments:} where recorded historic data of users' ratings are used to simulate online users behaviour. The aim of this type of experiment is to refine approaches before testing with real users. On the other hand, results may have biases due to distribution of users.
 \item \textbf{User studies:} where test subjects interact with the recommendation system and its behaviour is recorded giving a large sets of quantitative measurements. One disadvantage of this type of experiment is to recruit subjects that represent the population of the users of the real recommendation system.
 \item \textbf{Online evaluation:} where the designer of the recommender application expect to influence the users' behaviour. Usually, this type of evaluation are run after extensive offline studies.
 \end{itemize}
 
-Besides, evaluation of recommender systems can be classified \citep{1242} in:
+\subsection{Evaluation strategies}
+On the other hand, evaluation of recommender systems can be classified \parencite{1242} in:
 \begin{itemize}
-\item \textbf{System-centric} process has been extensively exploited in CF systems. The accuracy of recommendations is based exclusively on users' dataset.
+\item \textbf{System-centric} process has been extensively exploited in CF systems. The accuracy of recommendations is based exclusively on users' dataset and is evaluated through predictive accuracy, decision based and rank based metrics.
 \item \textbf{Network-centric} process examines other components of the recommendation system, such as diversity of recommendations, and they are measured as a complement of the metrics of system-centric evaluation.
 \item \textbf{User-centric:} The perceived quality and usefulness of recommendations for the users are measured via provided feedback.
 \end{itemize}
 
+\subsection{Decision based metrics}
+Our hybrid recommender produces a list of songs for each user, hence, it is necessary to evaluate the recommendation with a metrics derived from \textit{confusion matrix} that reflects the categorisation of test items as true positives (TP), false positives (FP), true negatives (TN) and false negatives (FN). In this project we consider the following metrics \parencite{1242}:
+\subsubsection{Precision}
+\begin{equation}
+Precision = \frac{TP} {TP+FP}\label{eq:1}
+\end{equation}
+%Text \eqref{eq:1}
+\subsubsection{Recall}
+\begin{equation}
+Recall = \frac{TP} {TP+FN}\label{eq:2}
+\end{equation}
+%Text \eqref{eq:2}
+\subsubsection{F1}
+\begin{equation}
+Recall = \frac{2 \times Precision \times Recall} {Precision+Recall}\label{eq:3}
+\end{equation}
+\subsubsection{Accuracy}
+\begin{equation}
+Recall = \frac{TP+TN} {TP+FP+TN+FN}\label{eq:4}
+\end{equation}
+
 \section{Evaluation method}
-The hybrid music recommender system proposed in this project is evaluated through an offline experiment and the results are presented with system-centric metrics.
+The hybrid music recommender system proposed in this project is evaluated through an offline experiment and the results are presented with decision based metrics described in the previous section.
 
-\subsection{Dataset description}
-For the purpose of evaluation of the hybrid recommender system, a sample from the Taste Profile subset is used because the data format includes user-item ratings and it is publicly available. A 10-fold cross validation is performed which splits the data set in 90\% for training and 10\% for testing.
+\subsection{Training set and test set}
+For the purpose of evaluate and compare the performance, we use a random sample of 20 \% of the total size of the cleaned Taste Profile subset for testing and the rest 80 \% is used to train both the hybrid recommender approach and content-based recommender baseline. The sampling process is repeated for ten times.
 
-\subsection{Evaluation measures}
-Because the dataset does not include explicit ratings, hence, the number of plays of tracks are considered as users' behaviours,
+%\subsection{Evaluation measures}
+%Because the dataset does not include explicit ratings, hence, the number of plays of tracks are considered as users' behaviours,
 
 
 
--- a/Report/chapter5/results.tex	Wed Aug 26 02:00:48 2015 +0100
+++ b/Report/chapter5/results.tex	Sun Aug 30 15:49:27 2015 +0100
@@ -5,10 +5,10 @@
 	%\caption{\label{fig:frog1}This is a figure caption.}
 %\end{minipage}
 
-
-fadslkfjdsalfjdsalf
-\begin{table}[tbp]
-	\caption{Genre classification Results} % title of Table
+\section{Genre classification results}
+A total of 9 trials were executed to train, validate and test the CDNN for genre classification using a 3 second frame of each file of the GTZAN dataset. We obtained the following results showed in Table~\ref{table:genre}.
+\begin{table}[h!]
+	\caption{Genre classification results} % title of Table
 	\centering % used for centering table
 	\begin{tabular}{c c c c c} % centered columns (4 columns)
 		\hline\hline %inserts double horizontal lines
@@ -26,11 +26,62 @@
 		9 & 34.0 & 38.8 & 850 & 9.14 \\ [1ex] % [1ex] adds vertical space
 		\hline %inserts single line
 	\end{tabular}
-	\label{table:nonlin} % is used to refer this table in the text
+	\label{table:genre} % is used to refer this table in the text
 \end{table}
-el mindsafa se va a cabarakl;dflakdjfl;akds
-dflk;djflkajflkajf
-jlkd;fjlk;ajdlf;kajsld
 
+For the initial trial, the error is higher because the weight and bias values for each unit of the layers in the deep learning classifier are randomly initialised.
 
-jkdl;fkaj
\ No newline at end of file
+\section{Recommender evaluation results}
+\begin{table}[h!]
+	\caption{Evaluation of recommender systems (N=5)} % title of Table
+	\centering % used for centering table
+	\begin{adjustbox}{max width=\textwidth}
+	\begin{tabular}{c c c c c} % centered columns (4 columns)
+		\hline\hline %inserts double horizontal lines
+		Recommender & Precision & Recall & F1 & Accuracy \\ [0.5ex] % inserts table
+		%heading
+		\hline % inserts single horizontal line
+		Content-based (baseline) & 0.275 $\pm$ 0.087 & 0.010 $\pm$ 0.003 & 0.020 $\pm$ 0.007 & 0.681 $\pm$ 0.008 \\ % inserting body of the table
+		Hybrid (permutation EDA) & \textbf{0.391 $\pm$ 0.182} & \textbf{0.013 $\pm$ 0.007} & \textbf{0.025 $\pm$ 0.013} & \textbf{0.685 $\pm$ 0.009} \\
+		Hybrid (continuous UMDA) & 0.318 $\pm$ 0.142 & 0.011 $\pm$ 0.005 & 0.021 $\pm$ 0.011 & 0.683 $\pm$ 0.009 \\ [1ex] % [1ex] adds vertical space
+		\hline %inserts single line
+	\end{tabular}
+	\end{adjustbox}
+	\label{table:recn5} % is used to refer this table in the text
+\end{table}
+
+\begin{table}[h!]
+	\caption{Evaluation of recommender systems (N=10)} % title of Table
+	\centering % used for centering table
+	\begin{adjustbox}{max width=\textwidth}
+		\begin{tabular}{c c c c c} % centered columns (4 columns)
+			\hline\hline %inserts double horizontal lines
+			Recommender & Precision & Recall & F1 & Accuracy \\ [0.5ex] % inserts table
+			%heading
+			\hline % inserts single horizontal line
+			Content-based (baseline) & 0.301 $\pm$ 0.059 & 0.022 $\pm$ 0.007 & 0.041 $\pm$ 0.012 & 0.678 $\pm$ 0.007 \\ % inserting body of the table
+			Hybrid (permutation EDA) & \textbf{0.370 $\pm$ 0.073} & \textbf{0.024 $\pm$ 0.007} & \textbf{0.045 $\pm$ 0.013} & \textbf{0.682 $\pm$ 0.009} \\
+			Hybrid (continuous UMDA) & 0.309 $\pm$ 0.100 & 0.019 $\pm$ 0.007 & 0.036 $\pm$ 0.013 & 0.679 $\pm$ 0.009 \\ [1ex] % [1ex] adds vertical space
+			\hline %inserts single line
+		\end{tabular}
+	\end{adjustbox}
+	\label{table:recn10} % is used to refer this table in the text
+\end{table}
+
+\begin{table}[h!]
+	\caption{Evaluation of recommender systems (N=20)} % title of Table
+	\centering % used for centering table
+	\begin{adjustbox}{max width=\textwidth}
+		\begin{tabular}{c c c c c} % centered columns (4 columns)
+			\hline\hline %inserts double horizontal lines
+			Recommender & Precision & Recall & F1 & Accuracy \\ [0.5ex] % inserts table
+			%heading
+			\hline % inserts single horizontal line
+			Content-based (baseline) & 0.281 $\pm$ 0.052 & 0.041 $\pm$ 0.006 & 0.071 $\pm$ 0.010 & 0.666 $\pm$ 0.006 \\ % inserting body of the table
+			Hybrid (permutation EDA) & \textbf{0.363 $\pm$ 0.041} & \textbf{0.047 $\pm$ 0.008} & \textbf{0.084 $\pm$ 0.014} & \textbf{0.676 ±  $\pm$ 0.007} \\
+			Hybrid (continuous UMDA) & 0.302 $\pm$ 0.067 & 0.039 $\pm$ 0.011 & 0.070 $\pm$ 0.019 & 0.671 $\pm$ 0.010 \\ [1ex] % [1ex] adds vertical space
+			\hline %inserts single line
+		\end{tabular}
+	\end{adjustbox}
+	\label{table:recn20} % is used to refer this table in the text
+\end{table}
\ No newline at end of file
--- a/Report/chapter6/conclusions.tex	Wed Aug 26 02:00:48 2015 +0100
+++ b/Report/chapter6/conclusions.tex	Sun Aug 30 15:49:27 2015 +0100
@@ -1,3 +1,10 @@
 \chapter{Conclusion}
+Data is not strong enough
+\section{Future work}
 
-\section{Future work}
\ No newline at end of file
+\begin{itemize}
+	\item Download more audio data from 7digital catalog.
+	\item Consider another high-level representation instead of music genres or extend the number of genres.
+	\item Predict rating values of items from users' neighbourhood to evaluate the performance of our hybrid recommender and compare it with a traditional collaborative filtering baseline.
+	\item Optimisation of the profile using latent vectors.
+\end{itemize}
\ No newline at end of file
--- a/Report/chiliguano_msc_finalproject.blg	Wed Aug 26 02:00:48 2015 +0100
+++ b/Report/chiliguano_msc_finalproject.blg	Sun Aug 30 15:49:27 2015 +0100
@@ -1,6 +1,7 @@
 This is BibTeX, Version 0.99d (TeX Live 2015)
 Capacity: max_strings=35307, hash_size=35307, hash_prime=30011
 The top-level auxiliary file: chiliguano_msc_finalproject.aux
+The style file: biblatex.bst
 A level-1 auxiliary file: acknowledgements/acknowledgements.aux
 A level-1 auxiliary file: abstract/abstract.aux
 A level-1 auxiliary file: chapter1/introduction.aux
@@ -9,50 +10,356 @@
 A level-1 auxiliary file: chapter4/evaluation.aux
 A level-1 auxiliary file: chapter5/results.aux
 A level-1 auxiliary file: chapter6/conclusions.aux
-The style file: agsm.bst
-Database file #1: references.bib
-Warning--empty institution in export:115396
-Warning--empty booktitle in Sigtia20146959
-Warning--empty booktitle in Sturm20127
-You've used 18 entries,
-            2909 wiz_defined-function locations,
-            784 strings with 9691 characters,
-and the built_in function-call counts, 85694 in all, are:
-= -- 13970
-> -- 1179
-< -- 2
-+ -- 6696
-- -- 359
-* -- 7452
-:= -- 16983
-add.period$ -- 24
-call.type$ -- 18
-change.case$ -- 250
-chr.to.int$ -- 18
-cite$ -- 21
-duplicate$ -- 338
-empty$ -- 664
-format.name$ -- 431
-if$ -- 15496
-int.to.chr$ -- 5
-int.to.str$ -- 0
-missing$ -- 16
-newline$ -- 76
-num.names$ -- 192
-pop$ -- 89
+Reallocated singl_function (elt_size=4) to 100 items from 50.
+Reallocated singl_function (elt_size=4) to 100 items from 50.
+Reallocated singl_function (elt_size=4) to 100 items from 50.
+Reallocated singl_function (elt_size=4) to 100 items from 50.
+Reallocated singl_function (elt_size=4) to 100 items from 50.
+Reallocated wiz_functions (elt_size=4) to 6000 items from 3000.
+Reallocated singl_function (elt_size=4) to 100 items from 50.
+Reallocated singl_function (elt_size=4) to 100 items from 50.
+Reallocated singl_function (elt_size=4) to 100 items from 50.
+Reallocated singl_function (elt_size=4) to 100 items from 50.
+Reallocated singl_function (elt_size=4) to 100 items from 50.
+Reallocated field_info (elt_size=4) to 11116 items from 5000.
+Database file #1: chiliguano_msc_finalproject-blx.bib
+Database file #2: references.bib
+Warning--I'm ignoring Putzke2014519's extra "keywords" field
+--line 208 of file references.bib
+Warning--I'm ignoring Putzke2014519's extra "keywords" field
+--line 209 of file references.bib
+Warning--I'm ignoring Putzke2014519's extra "keywords" field
+--line 210 of file references.bib
+Biblatex version: 3.0
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2513 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2517 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2517 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2517 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2517 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2517 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2517 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2517 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2517 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2517 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2523 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2523 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2523 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2523 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2523 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2523 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2523 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2523 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2523 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2527 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2531 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2531 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2531 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2531 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2531 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2531 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2531 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2531 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2531 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2537 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2537 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2537 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2537 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2537 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2537 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2537 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2537 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2537 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2541 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2595 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2595 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2595 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2595 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2595 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2595 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2595 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2595 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2595 of file biblatex.bst
+Reallocated singl_function (elt_size=4) to 100 items from 50.
+Reallocated wiz_functions (elt_size=4) to 9000 items from 6000.
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Blog.seagatesoft.com," has a comma at the end for entry 1_blogseagatesoftcom_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Deeplearning.net," has a comma at the end for entry 1_deeplearning.net_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2659 of file biblatex.bst
+Name 1 in "Hypebot.com," has a comma at the end for entry 1_hypebot.com_2015
+while executing---line 2659 of file biblatex.bst
+You've used 44 entries,
+            6047 wiz_defined-function locations,
+            1447 strings with 25348 characters,
+and the built_in function-call counts, 155530 in all, are:
+= -- 5278
+> -- 6585
+< -- 1386
++ -- 3193
+- -- 3553
+* -- 12948
+:= -- 10546
+add.period$ -- 0
+call.type$ -- 44
+change.case$ -- 635
+chr.to.int$ -- 191
+cite$ -- 86
+duplicate$ -- 17340
+empty$ -- 15936
+format.name$ -- 3370
+if$ -- 33222
+int.to.chr$ -- 0
+int.to.str$ -- 100
+missing$ -- 0
+newline$ -- 1536
+num.names$ -- 1909
+pop$ -- 14117
 preamble$ -- 1
-purify$ -- 270
+purify$ -- 789
 quote$ -- 0
-skip$ -- 286
+skip$ -- 8181
 stack$ -- 0
-substring$ -- 19771
-swap$ -- 83
-text.length$ -- 254
-text.prefix$ -- 0
-top$ -- 0
-type$ -- 70
-warning$ -- 3
-while$ -- 406
+substring$ -- 3007
+swap$ -- 6157
+text.length$ -- 1287
+text.prefix$ -- 43
+top$ -- 1
+type$ -- 1651
+warning$ -- 0
+while$ -- 947
 width$ -- 0
-write$ -- 271
-(There were 3 warnings)
+write$ -- 1491
+(There were 144 error messages)
Binary file Report/chiliguano_msc_finalproject.dvi has changed
--- a/Report/chiliguano_msc_finalproject.lof	Wed Aug 26 02:00:48 2015 +0100
+++ b/Report/chiliguano_msc_finalproject.lof	Sun Aug 30 15:49:27 2015 +0100
@@ -1,6 +1,31 @@
+\boolfalse {citerequest}\boolfalse {citetracker}\boolfalse {pagetracker}\boolfalse {backtracker}\relax 
+\defcounter {refsection}{0}\relax 
 \addvspace {10\p@ }
+\defcounter {refsection}{0}\relax 
 \addvspace {10\p@ }
+\defcounter {refsection}{0}\relax 
+\contentsline {figure}{\numberline {2.1}{\ignorespaces Collaborative filtering process \parencite {sarwar2001item}\relax }}{9}{figure.caption.5}
+\defcounter {refsection}{0}\relax 
+\contentsline {figure}{\numberline {2.2}{\ignorespaces User based collaborative filtering \parencite {1_siddharths_blog_2013}\relax }}{9}{figure.caption.6}
+\defcounter {refsection}{0}\relax 
+\contentsline {figure}{\numberline {2.3}{\ignorespaces Item based collaborative filtering \parencite {1_siddharths_blog_2013}\relax }}{9}{figure.caption.7}
+\defcounter {refsection}{0}\relax 
+\contentsline {figure}{\numberline {2.4}{\ignorespaces Content-based filtering process \parencite {1_blogseagatesoftcom_2015}\relax }}{11}{figure.caption.10}
+\defcounter {refsection}{0}\relax 
+\contentsline {figure}{\numberline {2.5}{\ignorespaces Content-based filtering process\relax }}{15}{figure.caption.12}
+\defcounter {refsection}{0}\relax 
+\contentsline {figure}{\numberline {2.6}{\ignorespaces Three-way aspect model\nobreakspace {}\parencite {Yoshii2008435}\relax }}{17}{figure.caption.15}
+\defcounter {refsection}{0}\relax 
+\contentsline {figure}{\numberline {2.7}{\ignorespaces Schematic representation of a deep neural network\nobreakspace {}\parencite {1_brown_2014}\relax }}{19}{figure.caption.16}
+\defcounter {refsection}{0}\relax 
+\contentsline {figure}{\numberline {2.8}{\ignorespaces Convolutional deep neural network LeNet model \parencite {1_deeplearning.net_2015}\relax }}{21}{figure.caption.18}
+\defcounter {refsection}{0}\relax 
 \addvspace {10\p@ }
+\defcounter {refsection}{0}\relax 
+\contentsline {figure}{\numberline {3.1}{\ignorespaces Cleaning of the taste profile subset\relax }}{24}{figure.caption.20}
+\defcounter {refsection}{0}\relax 
 \addvspace {10\p@ }
+\defcounter {refsection}{0}\relax 
 \addvspace {10\p@ }
+\defcounter {refsection}{0}\relax 
 \addvspace {10\p@ }
Binary file Report/chiliguano_msc_finalproject.pdf has changed
Binary file Report/chiliguano_msc_finalproject.synctex.gz has changed
--- a/Report/chiliguano_msc_finalproject.tex	Wed Aug 26 02:00:48 2015 +0100
+++ b/Report/chiliguano_msc_finalproject.tex	Sun Aug 30 15:49:27 2015 +0100
@@ -13,12 +13,12 @@
 % * check through for question mark errors in render
 % * make sure the bibliog doesn't have ugly urls in
 
-\usepackage{ifdraft}
+%\usepackage{ifdraft}
 \usepackage{amsmath}
 \usepackage{amsfonts}
 \usepackage{amssymb}
-\usepackage{natbib}
-\usepackage{har2nat}
+%\usepackage{natbib}
+%\usepackage{har2nat}
 \usepackage{rotating}
 \usepackage[breaklinks]{hyperref}
 \usepackage{subfig} % apparently subfig is the one to use not subfigure
@@ -28,12 +28,17 @@
 \usepackage{setspace}
 \usepackage[absolute]{textpos} 
 
+\usepackage{csquotes}
+\usepackage[style=authoryear-ibid, backend=bibtex]{biblatex}
+\addbibresource{references.bib}
+
+\usepackage{adjustbox}
 
 \begin{document}
 
-\setlength{\TPHorizModule}{200mm} 
-\setlength{\TPVertModule}{100mm} 
-\textblockorigin{61mm}{19mm}
+%\setlength{\TPHorizModule}{200mm} 
+%\setlength{\TPVertModule}{100mm} 
+%\textblockorigin{61mm}{19mm}
 	
 %%%%% thanks alex mclean for super-useful onscreen reading tip:
 %\usepackage[top=0.1in, bottom=0.1in, left=0.3in, right=0.3in, paperwidth=11in, paperheight=7in]{geometry} % activate for ONSCREEN reading shape AT HOME
@@ -48,7 +53,7 @@
 	%\vspace{0.5cm}
 	{\large \textbf{School of Electronic\\Engineering and\\Computer Science}}\vspace{16.5cm}\newline
 	%{\large School of Electronic\\Engineering and\\Computer Science}\vspace{15.7cm}\newline
-	\includegraphics[scale=0.15]{Logo}\vspace{0.5cm}
+	\includegraphics[scale=0.15]{Logo.png}\vspace{0.5cm}
 \end{minipage}
 \vline width 2pt
 \hspace{0.8cm}
@@ -96,7 +101,6 @@
 \listoffigures
 \listoftables
 
-% could also have a \listoftables, but this example doesn't include any
 
 
 % Start the main context
@@ -104,19 +108,15 @@
 %\mainmatter
 
 \include{chapter1/introduction}
-
 \include{chapter2/background}
-
 \include{chapter3/ch3}
-
 \include{chapter4/evaluation}
-
 \include{chapter5/results}
-
 \include{chapter6/conclusions}
 
-\bibliographystyle{agsm}
-\bibliography{references}
+%\bibliographystyle{agsm}
+\addcontentsline{toc}{chapter}{References}
+\printbibliography[title=References]
 
 %\backmatter
 
--- a/Report/chiliguano_msc_finalproject.toc	Wed Aug 26 02:00:48 2015 +0100
+++ b/Report/chiliguano_msc_finalproject.toc	Sun Aug 30 15:49:27 2015 +0100
@@ -1,39 +1,117 @@
+\boolfalse {citerequest}\boolfalse {citetracker}\boolfalse {pagetracker}\boolfalse {backtracker}\relax 
+\defcounter {refsection}{0}\relax 
 \contentsline {chapter}{\numberline {1}Introduction}{1}{chapter.1}
+\defcounter {refsection}{0}\relax 
 \contentsline {section}{\numberline {1.1}Motivation}{2}{section.1.1}
-\contentsline {section}{\numberline {1.2}Aim}{3}{section.1.2}
-\contentsline {section}{\numberline {1.3}Outline of the thesis}{4}{section.1.3}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {1.2}Aims}{3}{section.1.2}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {1.3}Thesis outline}{4}{section.1.3}
+\defcounter {refsection}{0}\relax 
 \contentsline {chapter}{\numberline {2}Background}{5}{chapter.2}
+\defcounter {refsection}{0}\relax 
 \contentsline {section}{\numberline {2.1}Online Social Networks}{6}{section.2.1}
-\contentsline {subsection}{\numberline {2.1.1}Last.fm}{6}{subsection.2.1.1}
+\defcounter {refsection}{0}\relax 
 \contentsline {section}{\numberline {2.2}Music services platforms}{7}{section.2.2}
-\contentsline {subsection}{\numberline {2.2.1}Echonest}{7}{subsection.2.2.1}
-\contentsline {subsection}{\numberline {2.2.2}7Digital}{7}{subsection.2.2.2}
-\contentsline {section}{\numberline {2.3}Recommender Systems}{7}{section.2.3}
-\contentsline {subsection}{\numberline {2.3.1}Collaborative filtering}{7}{subsection.2.3.1}
-\contentsline {subsection}{\numberline {2.3.2}Content-based methods}{8}{subsection.2.3.2}
-\contentsline {section}{\numberline {2.4}Hybrid recommender methods}{8}{section.2.4}
-\contentsline {section}{\numberline {2.5}Music Information Retrieval}{8}{section.2.5}
-\contentsline {subsection}{\numberline {2.5.1}Musical genre classification}{8}{subsection.2.5.1}
-\contentsline {subsection}{\numberline {2.5.2}Deep Learning}{9}{subsection.2.5.2}
-\contentsline {subsection}{\numberline {2.5.3}Convolutional Neural Networks}{9}{subsection.2.5.3}
-\contentsline {section}{\numberline {2.6}Estimation of Distribution Algorithms}{10}{section.2.6}
-\contentsline {chapter}{\numberline {3}Methodology}{11}{chapter.3}
-\contentsline {section}{\numberline {3.1}Data collection}{11}{section.3.1}
-\contentsline {subsection}{\numberline {3.1.1}Taste Profile subset cleaning}{12}{subsection.3.1.1}
-\contentsline {subsection}{\numberline {3.1.2}Audio clips retrieval}{12}{subsection.3.1.2}
-\contentsline {subsection}{\numberline {3.1.3}Intermediate time-frequency representation for audio signals}{13}{subsection.3.1.3}
-\contentsline {section}{\numberline {3.2}Data preprocessing}{13}{section.3.2}
-\contentsline {section}{\numberline {3.3}Algorithms}{14}{section.3.3}
-\contentsline {subsection}{\numberline {3.3.1}CNN architecture}{14}{subsection.3.3.1}
-\contentsline {subsubsection}{Genre classification}{14}{section*.5}
-\contentsline {subsection}{\numberline {3.3.2}Continuous Bayesian EDA}{14}{subsection.3.3.2}
-\contentsline {subsection}{\numberline {3.3.3}EDA-based hybrid recommender}{14}{subsection.3.3.3}
-\contentsline {chapter}{\numberline {4}Experiments}{15}{chapter.4}
-\contentsline {section}{\numberline {4.1}Evaluation for recommender systems}{15}{section.4.1}
-\contentsline {subsection}{\numberline {4.1.1}Types of experiments}{15}{subsection.4.1.1}
-\contentsline {section}{\numberline {4.2}Evaluation method}{17}{section.4.2}
-\contentsline {subsection}{\numberline {4.2.1}Dataset description}{17}{subsection.4.2.1}
-\contentsline {subsection}{\numberline {4.2.2}Evaluation measures}{17}{subsection.4.2.2}
-\contentsline {chapter}{\numberline {5}Results}{18}{chapter.5}
-\contentsline {chapter}{\numberline {6}Conclusion}{20}{chapter.6}
-\contentsline {section}{\numberline {6.1}Future work}{20}{section.6.1}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {2.3}Recommender Systems}{8}{section.2.3}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {2.3.1}Collaborative filtering}{8}{subsection.2.3.1}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{The cold start problem}{10}{section*.8}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{The long tail phenomenon}{11}{section*.9}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {2.3.2}Content-based filtering}{11}{subsection.2.3.2}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{Limitations of CB filtering}{12}{section*.11}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {2.3.3}Item Representation}{12}{subsection.2.3.3}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {2.3.4}User Modelling}{13}{subsection.2.3.4}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {2.3.5}Hybrid recommender approaches}{13}{subsection.2.3.5}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {2.4}Music Information Retrieval}{15}{section.2.4}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {2.4.1}Genre classification}{15}{subsection.2.4.1}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {2.4.2}Music recommender systems}{16}{subsection.2.4.2}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{Collaborative retrieval music recommender}{16}{section*.13}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{Hybrid music recommender}{16}{section*.14}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {2.5}Deep Learning}{18}{section.2.5}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {2.5.1}Deep Neural Networks}{18}{subsection.2.5.1}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{Music Feature Learning}{20}{section*.17}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {2.5.2}Convolutional Deep Neural Networks}{20}{subsection.2.5.2}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{Deep content-based music recommendation}{21}{section*.19}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {2.6}Estimation of Distribution Algorithms}{21}{section.2.6}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {2.7}Summary}{22}{section.2.7}
+\defcounter {refsection}{0}\relax 
+\contentsline {chapter}{\numberline {3}Methodology}{23}{chapter.3}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {3.1}Data collection}{23}{section.3.1}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {3.1.1}Taste Profile subset cleaning}{24}{subsection.3.1.1}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {3.1.2}Fetching audio data}{25}{subsection.3.1.2}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {3.1.3}Intermediate time-frequency representation for audio signals}{26}{subsection.3.1.3}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {3.2}Data preprocessing}{27}{section.3.2}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {3.3}Algorithms}{27}{section.3.3}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {3.3.1}Music genre classifier}{27}{subsection.3.3.1}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{CNN network architecture}{27}{section*.21}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {3.3.2}User profile modelling}{28}{subsection.3.3.2}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{Permutation EDA}{28}{section*.22}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{Continuous Univariate Marginal Distribution Algorithm}{28}{section*.23}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {3.3.3}Song recommendation}{28}{subsection.3.3.3}
+\defcounter {refsection}{0}\relax 
+\contentsline {chapter}{\numberline {4}Experiments}{29}{chapter.4}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {4.1}Evaluation for recommender systems}{29}{section.4.1}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {4.1.1}Types of experiments}{29}{subsection.4.1.1}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {4.1.2}Evaluation strategies}{30}{subsection.4.1.2}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {4.1.3}Decision based metrics}{31}{subsection.4.1.3}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{Precision}{31}{section*.24}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{Recall}{31}{section*.25}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{F1}{31}{section*.26}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsubsection}{Accuracy}{31}{section*.27}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {4.2}Evaluation method}{32}{section.4.2}
+\defcounter {refsection}{0}\relax 
+\contentsline {subsection}{\numberline {4.2.1}Training set and test set}{32}{subsection.4.2.1}
+\defcounter {refsection}{0}\relax 
+\contentsline {chapter}{\numberline {5}Results}{33}{chapter.5}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {5.1}Genre classification results}{33}{section.5.1}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {5.2}Recommender evaluation results}{34}{section.5.2}
+\defcounter {refsection}{0}\relax 
+\contentsline {chapter}{\numberline {6}Conclusion}{35}{chapter.6}
+\defcounter {refsection}{0}\relax 
+\contentsline {section}{\numberline {6.1}Future work}{35}{section.6.1}
+\defcounter {refsection}{0}\relax 
+\contentsline {chapter}{References}{36}{section.6.1}
--- a/Report/references.bib	Wed Aug 26 02:00:48 2015 +0100
+++ b/Report/references.bib	Sun Aug 30 15:49:27 2015 +0100
@@ -1,3 +1,281 @@
+@online{1_lecun_2015,
+	author={LeCun, Yann},
+	title={MNIST Demos on Yann LeCun's website},
+	url={http://yann.lecun.com/exdb/lenet/},
+	urldate={2015-8-30},
+	journal={Yann.lecun.com},
+	year={2015}
+}
+@online{1_brown_2014,
+	author={Brown, Larry},
+	title={Accelerate Machine Learning with the cuDNN Deep Neural Network Library},
+	url={http://devblogs.nvidia.com/parallelforall/accelerate-machine-learning-cudnn-deep-neural-network-library/},
+	urldate={2015-8-30},
+	journal={Parallel Forall},
+	year={2014}
+}
+
+@article{hinton2012deep,
+	title={Deep neural networks for acoustic modeling in speech recognition: The shared views of four research groups},
+	author={Hinton, Geoffrey and Deng, Li and Yu, Dong and Dahl, George E and Mohamed, Abdel-rahman and Jaitly, Navdeep and Senior, Andrew and Vanhoucke, Vincent and Nguyen, Patrick and Sainath, Tara N and others},
+	journal={Signal Processing Magazine, IEEE},
+	volume={29},
+	number={6},
+	pages={82--97},
+	year={2012},
+	publisher={IEEE}
+}
+@article{weston2012latent,
+	title={Latent collaborative retrieval},
+	author={Weston, Jason and Wang, Chong and Weiss, Ron and Berenzweig, Adam},
+	journal={arXiv preprint arXiv:1206.4603},
+	year={2012}
+}
+
+@MISC{Bastien-Theano-2012,
+	author = {Bastien, Fr{\'{e}}d{\'{e}}ric and Lamblin, Pascal and Pascanu, Razvan and Bergstra, James and Goodfellow, Ian J. and Bergeron, Arnaud and Bouchard, Nicolas and Bengio, Yoshua},
+	title = {Theano: new features and speed improvements},
+	year = {2012},
+	howpublished = {Deep Learning and Unsupervised Feature Learning NIPS 2012 Workshop},
+	abstract = {Theano is a linear algebra compiler that optimizes a user’s symbolically-specified
+	mathematical computations to produce efficient low-level implementations. In
+	this paper, we present new features and efficiency improvements to Theano, and
+	benchmarks demonstrating Theano’s performance relative to Torch7, a recently
+	introduced machine learning library, and to RNNLM, a C++ library targeted at
+	recurrent neural networks.}
+}
+
+@INPROCEEDINGS{bergstra+al:2010-scipy,
+	author = {Bergstra, James and Breuleux, Olivier and Bastien, Fr{\'{e}}d{\'{e}}ric and Lamblin, Pascal and Pascanu, Razvan and Desjardins, Guillaume and Turian, Joseph and Warde-Farley, David and Bengio, Yoshua},
+	month = jun,
+	title = {Theano: a {CPU} and {GPU} Math Expression Compiler},
+	booktitle = {Proceedings of the Python for Scientific Computing Conference ({SciPy})},
+	year = {2010},
+	location = {Austin, TX},
+	note = {Oral Presentation},
+	abstract = {Theano is a compiler for mathematical expressions in Python that combines the convenience of NumPy’s syntax with the speed of optimized native machine language. The user composes mathematical expressions in a high-level description that mimics NumPy’s syntax and semantics, while being statically typed and
+	functional (as opposed to imperative). These expressions allow Theano to provide symbolic differentiation. Before performing computation, Theano optimizes the choice of expressions, translates
+	them into C++ (or CUDA for GPU), compiles them into dynamically loaded Python modules, all automatically. Common machine learning algorithms implemented with Theano are from 1.6× to 7.5× faster than competitive alternatives (including those implemented with C/C++, NumPy/SciPy and MATLAB) when compiled for the
+	CPU and between 6.5× and 44× faster when compiled for the GPU. This paper illustrates how to use Theano, outlines the scope of the compiler, provides benchmarks on both CPU and GPU processors, and explains its overall design.}
+}
+
+@article{kereliuk15,
+	title={Deep Learning and Music Adversaries},
+	author={Kereliuk, Corey and Sturm, Bob L and Larsen, Jan},
+	journal={arXiv preprint arXiv:1507.04761},
+	year={2015}
+}
+
+@article{DBLP:journals/corr/abs-1305-1114,
+	author    = {Djallel Bouneffouf},
+	title     = {Towards User Profile Modelling in Recommender System},
+	journal   = {CoRR},
+	volume    = {abs/1305.1114},
+	year      = {2013},
+	url       = {http://arxiv.org/abs/1305.1114},
+	timestamp = {Sun, 02 Jun 2013 20:48:21 +0200},
+	biburl    = {http://dblp.uni-trier.de/rec/bib/journals/corr/abs-1305-1114},
+	bibsource = {dblp computer science bibliography, http://dblp.org}
+}
+
+@ARTICLE{Yao2015453,
+	author={Yao, L. and Sheng, Q.Z. and Ngu, A.H.H. and Yu, J. and Segev, A.},
+	title={Unified collaborative and content-based web service recommendation},
+	journal={IEEE Transactions on Services Computing},
+	year={2015},
+	volume={8},
+	number={3},
+	pages={453-466},
+	doi={10.1109/TSC.2014.2355842},
+	art_number={6894179},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84932619562&partnerID=40&md5=4483a697e12fc53f620393586f85aebe},
+	document_type={Article},
+	source={Scopus},
+}
+@online{1_blogseagatesoftcom_2015,
+	author={Blog.seagatesoft.com,},
+	title={Belajar Sistem Perekomendasi «  Corat-coret di Halaman Web},
+	url={http://blog.seagatesoft.com/2013/07/14/belajar-sistem-perekomendasi/},
+	urldate={2015-8-29},
+	year={2015}
+}
+
+@inproceedings{sarwar2001item,
+	title={Item-based collaborative filtering recommendation algorithms},
+	author={Sarwar, Badrul and Karypis, George and Konstan, Joseph and Riedl, John},
+	booktitle={Proceedings of the 10th international conference on World Wide Web},
+	pages={285--295},
+	year={2001},
+	organization={ACM}
+}
+
+@CONFERENCE{Hu2008263,
+	author={Hu, Y. and Volinsky, C. and Koren, Y.},
+	title={Collaborative filtering for implicit feedback datasets},
+	journal={Proceedings - IEEE International Conference on Data Mining, ICDM},
+	year={2008},
+	pages={263-272},
+	doi={10.1109/ICDM.2008.22},
+	art_number={4781121},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-67049164166&partnerID=40&md5=01238b08208962fd0fdcc7503fa3af99},
+	document_type={Conference Paper},
+	source={Scopus},
+}
+
+@online{2_the_economist_2005,
+	author={The Economist,},
+	title={United we find},
+	url={http://www.economist.com/node/3714044},
+	urldate={2015-8-28},
+	year={2005}
+}
+
+@online{1_siddharths_blog_2013,
+	author={},
+	title={Recommendation Engine},
+	url={https://spatnaik77.wordpress.com/2013/07/17/recommendation-engine/},
+	urldate={2015-8-28},
+	year={2013}
+}
+
+@online{1_deeplearning.net_2015,
+	author={Deeplearning.net,},
+	title={Convolutional Neural Networks (LeNet) — DeepLearning 0.1 documentation},
+	url={http://deeplearning.net/tutorial/lenet.html},
+	urldate={2015-8-28},
+	year={2015}
+}
+@ARTICLE{Casey2008668,
+	author={Casey, M.A. and Veltkamp, R. and Goto, M. and Leman, M. and Rhodes, C. and Slaney, M.},
+	title={Content-based music information retrieval: Current directions and future challenges},
+	journal={Proceedings of the IEEE},
+	year={2008},
+	volume={96},
+	number={4},
+	pages={668-696},
+	doi={10.1109/JPROC.2008.916370},
+	art_number={4472077},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-64649105397&partnerID=40&md5=2d8ec7231e10bc686566dd419ce47ae8},
+	document_type={Article},
+	source={Scopus},
+}
+
+@CONFERENCE{Celma2006,
+	author={Celma, O. and Herrera, P. and Serra, X.},
+	title={Bridging the music semantic gap},
+	journal={CEUR Workshop Proceedings},
+	year={2006},
+	volume={187},
+	page_count={12},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84884332226&partnerID=40&md5=d028cb2aca5d2d6d8f25a8a8b555edbf},
+	document_type={Conference Paper},
+	source={Scopus},
+}
+
+@online{1_spotify_press_2014,
+	author={},
+	title={Spotify Acquires The Echo Nest},
+	url={https://press.spotify.com/us/2014/03/06/spotify-acquires-the-echo-nest/},
+	urldate={2015-8-27},
+	year={2014}
+}
+@article{smith2009social,
+	title={The social media revolution},
+	author={Smith, Tom},
+	journal={International journal of market research},
+	volume={51},
+	number={4},
+	pages={559--561},
+	year={2009}
+}
+@article{Putzke2014519,
+	title = "Cross-cultural gender differences in the adoption and usage of social media platforms – An exploratory study of Last.FM ",
+	journal = "Computer Networks ",
+	volume = "75, Part B",
+	number = "",
+	pages = "519 - 530",
+	year = "2014",
+	note = "Special Issue on Online Social NetworksThe Connectedness, Pervasiveness and Ubiquity of Online Social Networks ",
+	issn = "1389-1286",
+	doi = "http://dx.doi.org/10.1016/j.comnet.2014.08.027",
+	url = "http://www.sciencedirect.com/science/article/pii/S1389128614003302",
+	author = "Johannes Putzke and Kai Fischbach and Detlef Schoder and Peter A. Gloor",
+	keywords = "Adoption",
+	keywords = "Cross-cultural differences",
+	keywords = "Gender",
+	keywords = "Social media ",
+	abstract = "Abstract This paper examines cross-cultural gender differences in the adoption and usage of the social media platform Last.FM. From a large-scale empirical study of 3748 Last.FM users from Australia, Finland, Germany, and the United States of America, we find: (1) men listen to more pieces of music on social media platforms than do women; (2) women focus their listening on fewer musical genres and fewer tracks than do men; (3) women register on Last.FM later than do “early adopting” men (absolutely and in comparison to their friends), but at a younger age; (4) women maintain more virtual friendships on Last.FM than do men; and (5) women, when choosing music to listen to on social media platforms, are more likely than are men to choose tracks that correspond to mainstream tastes. "
+}
+
+@CONFERENCE{Park200811,
+	author={Park, Y.-J. and Tuzhilin, A.},
+	title={The long tail of recommender systems and how to leverage it},
+	journal={RecSys'08: Proceedings of the 2008 ACM Conference on Recommender Systems},
+	year={2008},
+	pages={11-18},
+	doi={10.1145/1454008.1454012},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-63449136183&partnerID=40&md5=648e50cac2d99764f891b5bc4b97bbfe},
+	document_type={Conference Paper},
+	source={Scopus},
+}
+@CONFERENCE{Dai20141760,
+	author={Dai, C. and Qian, F. and Jiang, W. and Wang, Z. and Wu, Z.},
+	title={A personalized recommendation system for netease dating site},
+	journal={Proceedings of the VLDB Endowment},
+	year={2014},
+	volume={7},
+	number={13},
+	pages={1760-1765},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84905828317&partnerID=40&md5=90fbd8b20ad39757895bdee3ca58f459},
+	document_type={Article},
+	source={Scopus},
+}
+
+@CONFERENCE{Yin2012896,
+	author={Yin, H. and Cui, B. and Li, J. and Yao, J. and Chen, C.},
+	title={Challenging the long tail recommendation},
+	journal={Proceedings of the VLDB Endowment},
+	year={2012},
+	volume={5},
+	number={9},
+	pages={896-907},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84863735354&partnerID=40&md5=2bd887772ba832fbbb4631afd25514d9},
+	document_type={Article},
+	source={Scopus},
+}
+
+@online{1_hypebot.com_2015,
+	author={Hypebot.com,},
+	title={Streaming Music Discovery: It's More Than Just Showing Album Credits - hypebot},
+	url={http://www.hypebot.com/hypebot/2015/07/streaming-music-discovery-its-more-than-just-showing-album-credits.html},
+	urldate={2015-8-26},
+	year={2015}
+}
+
+@online{ringen_2015,
+	author={Ringen, Jonathan},
+	title={Spotify, Apple Music, And The Streaming Wars: 5 Things We've Learned},
+	url={http://www.fastcompany.com/3048653/innovation-agents/listen-up},
+	urldate={2015-8-26},
+	journal={Fast Company},
+	year={2015}
+}
+
+@online{recsys2012,
+	author = {},
+	title = {Recommender Systems},
+	year = {2012}, 
+	url = {http://recommender-systems.org/},
+	note = {[Accessed: 26th August 2015]}
+}
+
 @article{Hejazi15,
 	author={Hejazi,S. A. and Stapleton,S. P.},
 	year={2015},
@@ -8,7 +286,7 @@
 	pages={89-99},
 	url={www.scopus.com},
 }
-}
+
 
 @book{larranaga2002estimation,
 	title={Estimation of distribution algorithms: A new tool for evolutionary computation},
@@ -34,7 +312,7 @@
 	editor={Ricci, Francesco and Rokach, Lior and Shapira, Bracha and Kantor, Paul B.},
 	doi={10.1007/978-0-387-85820-3_3},
 	title={Content-based Recommender Systems: State of the Art and Trends},
-	url={http://dx.doi.org/10.1007/978-0-387-85820-3\_3},
+	url={http://dx.doi.org/10.1007/978-0-387-85820-3_3},
 	publisher={Springer US},
 	author={Lops, Pasquale and de Gemmis, Marco and Semeraro, Giovanni},
 	pages={73-105},
@@ -50,8 +328,8 @@
 	number={4},
 	pages={331-370},
 	doi={10.1023/A:1021240730564},
-	note={cited By 0},
-	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0036959356\&partnerID=40\&md5=28885a102109be826507abc2435117a7},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0036959356&partnerID=40&md5=28885a102109be826507abc2435117a7},
 	document_type={Article},
 	source={Scopus},
 }
@@ -66,8 +344,8 @@
 	pages={435-447},
 	doi={10.1109/TASL.2007.911503},
 	art_number={4432655},
-	note={cited By 0},
-	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-39649112098\&partnerID=40\&md5=6827f82844ae1da58a6fa95caf5092d9},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-39649112098&partnerID=40&md5=6827f82844ae1da58a6fa95caf5092d9},
 	document_type={Article},
 	source={Scopus},
 }
@@ -95,8 +373,8 @@
 	volume={2013},
 	doi={10.1155/2013/704504},
 	art_number={704504},
-	note={cited By 0},
-	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84888882639\&partnerID=40\&md5=827fabc750db24f662fdae1c798f2507},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84888882639&partnerID=40&md5=827fabc750db24f662fdae1c798f2507},
 	document_type={Review},
 	source={Scopus},
 }
@@ -108,8 +386,8 @@
 	journal={Advances in Neural Information Processing Systems 22 - Proceedings of the 2009 Conference},
 	year={2009},
 	pages={1096-1104},
-	note={cited By 0},
-	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84863380535\&partnerID=40\&md5=e872a6227c816850167f91bb2d41d8b7},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84863380535&partnerID=40&md5=e872a6227c816850167f91bb2d41d8b7},
 	document_type={Conference Paper},
 	source={Scopus},
 }
@@ -189,8 +467,8 @@
 might like.
 </p>
 },
-	url = {http://mtg.upf.edu/static/media/PhD\_ocelma.pdf},
-	author = {Celma, \{`O}.},
+	url = {http://mtg.upf.edu/static/media/PhD_ocelma.pdf},
+	author = {Celma, \`{O}.}
 }
 
 @inproceedings{pachet2001musical,
@@ -211,8 +489,8 @@
 	number={5},
 	pages={293-302},
 	doi={10.1109/TSA.2002.800560},
-	note={cited By 976},
-	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0036648502\&partnerID=40\&md5=72d2fee186b42c9998f13415cbb79eea},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0036648502&partnerID=40&md5=72d2fee186b42c9998f13415cbb79eea},
 	document_type={Article},
 	source={Scopus},
 }
@@ -224,8 +502,8 @@
 	year={2012},
 	pages={7-12},
 	doi={10.1145/2390848.2390851},
-	note={cited By 0},
-	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84870497334\&partnerID=40\&md5=40a48c1c9d787308dd315694b54b64ec},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84870497334&partnerID=40&md5=40a48c1c9d787308dd315694b54b64ec},
 	document_type={Conference Paper},
 	source={Scopus},
 }
@@ -246,8 +524,8 @@
 	pages={6959-6963},
 	doi={10.1109/ICASSP.2014.6854949},
 	art_number={6854949},
-	note={cited By 0},
-	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84905259152\&partnerID=40\&md5=3441dfa8c7998a8eb39f668d43efb8a1},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84905259152&partnerID=40&md5=3441dfa8c7998a8eb39f668d43efb8a1},
 	document_type={Conference Paper},
 	source={Scopus},
 }
@@ -301,8 +579,8 @@
 	number={2},
 	pages={781-788},
 	doi={10.12733/jcis9623},
-	note={cited By 0},
-	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84892865461\&partnerID=40\&md5=a2927d36b493e8ef4d1cdab3055fa68b},
+	note={},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84892865461&partnerID=40&md5=a2927d36b493e8ef4d1cdab3055fa68b},
 	document_type={Article},
 	source={Scopus},
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/slides/chiliguano_msc_project_slides.tex	Sun Aug 30 15:49:27 2015 +0100
@@ -0,0 +1,95 @@
+\documentclass{beamer}
+%
+% Choose how your presentation looks.
+%
+% For more themes, color themes and font themes, see:
+% http://deic.uab.es/~iblanes/beamer_gallery/index_by_theme.html
+%
+\mode<presentation>
+{
+  \usetheme{Frankfurt}      % or try Darmstadt, Madrid, Warsaw, ...
+  \usecolortheme{rose} % or try albatross, beaver, crane, ...
+  \usecolortheme{seahorse}
+  \usefonttheme[onlymath]{serif}  % or try serif, structurebold, ...
+  \setbeamertemplate{navigation symbols}{}
+  \setbeamertemplate{caption}[numbered]
+} 
+
+\usepackage[english]{babel}
+\usepackage[utf8x]{inputenc}
+
+\title[Your Short Title]{Hybrid music recommender using content-based and social information}
+\author{Paulo Esteban Chiliguano Torres}
+\institute{School of Electronic Engineering and Computer Science\\Queen Mary University of London}
+\date{September 3rd, 2015}
+
+\begin{document}
+
+\begin{frame}
+  \titlepage
+\end{frame}
+
+% Uncomment these lines for an automatically generated outline.
+%\begin{frame}{Outline}
+%  \tableofcontents
+%\end{frame}
+
+\section{Introduction}
+
+\begin{frame}{Introduction}
+
+\begin{itemize}
+  \item Your introduction goes here!
+  \item Use \texttt{itemize} to organize your main points.
+\end{itemize}
+
+\vskip 1cm
+
+\begin{block}{Examples}
+Some examples of commonly used commands and features are included, to help you get started.
+\end{block}
+
+\end{frame}
+
+\section{Some \LaTeX{} Examples}
+
+\subsection{Tables and Figures}
+
+\begin{frame}{Tables and Figures}
+
+\begin{itemize}
+\item Use \texttt{tabular} for basic tables --- see Table~\ref{tab:widgets}, for example.
+\item You can upload a figure (JPEG, PNG or PDF) using the files menu. 
+\item To include it in your document, use the \texttt{includegraphics} command (see the comment below in the source code).
+\end{itemize}
+
+% Commands to include a figure:
+%\begin{figure}
+%\includegraphics[width=\textwidth]{your-figure's-file-name}
+%\caption{\label{fig:your-figure}Caption goes here.}
+%\end{figure}
+
+\begin{table}
+\centering
+\begin{tabular}{l|r}
+Item & Quantity \\\hline
+Widgets & 42 \\
+Gadgets & 13
+\end{tabular}
+\caption{\label{tab:widgets}An example table.}
+\end{table}
+
+\end{frame}
+
+\subsection{Mathematics}
+
+\begin{frame}{Readable Mathematics}
+
+Let $X_1, X_2, \ldots, X_n$ be a sequence of independent and identically distributed random variables with $\text{E}[X_i] = \mu$ and $\text{Var}[X_i] = \sigma^2 < \infty$, and let
+$$S_n = \frac{X_1 + X_2 + \cdots + X_n}{n}
+      = \frac{1}{n}\sum_{i}^{n} X_i$$
+denote their mean. Then as $n$ approaches infinity, the random variables $\sqrt{n}(S_n - \mu)$ converge in distribution to a normal $\mathcal{N}(0, \sigma^2)$.
+
+\end{frame}
+
+\end{document}