changeset 20:1dbd24575d44

Report update
author Paulo Chiliguano <p.e.chiilguano@se14.qmul.ac.uk>
date Tue, 04 Aug 2015 12:13:47 +0100
parents f1504bb2c552
children e68dbee1f6db
files Code/convolutional_mlp.py Code/logistic_sgd.py Code/prepare_dataset.py Report/abstract/abstract.tex Report/acknowledgements/acknowledgements.tex Report/chapter1/introduction.tex Report/chapter2/background.tex Report/chapter3/ch3.tex Report/chapter4/evaluation.tex Report/chapter5/results.tex Report/chapter6/conclusions.tex Report/chiliguano_msc_finalproject.aux Report/chiliguano_msc_finalproject.bbl Report/chiliguano_msc_finalproject.blg Report/chiliguano_msc_finalproject.lof Report/chiliguano_msc_finalproject.log Report/chiliguano_msc_finalproject.pdf Report/chiliguano_msc_finalproject.synctex.gz Report/chiliguano_msc_finalproject.tex Report/chiliguano_msc_finalproject.toc Report/references.bib
diffstat 21 files changed, 474 insertions(+), 409 deletions(-) [+]
line wrap: on
line diff
--- a/Code/convolutional_mlp.py	Tue Jul 28 21:14:27 2015 +0100
+++ b/Code/convolutional_mlp.py	Tue Aug 04 12:13:47 2015 +0100
@@ -137,7 +137,7 @@
 
 def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
                     dataset='mnist.pkl.gz',
-                    nkerns=[256, 256], batch_size=20):
+                    nkerns=[10, 10], batch_size=20):
     """ Demonstrates lenet on MNIST dataset
 
     :type learning_rate: float
@@ -188,7 +188,7 @@
     # to a 4D tensor, compatible with our LeNetConvPoolLayer
     # (28, 28) is the size of MNIST images.
     #layer0_input = x.reshape((batch_size, 1, 28, 28))
-    layer0_input = x.reshape((batch_size, 1, 1204, 513))
+    layer0_input = x.reshape((batch_size, 1, 128, 513))
     # Construct the first convolutional pooling layer:
     # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
     # maxpooling reduces this further to (24/2, 24/2) = (12, 12)
@@ -197,7 +197,7 @@
         rng,
         input=layer0_input,
         #image_shape=(batch_size, 1, 28, 28),
-        image_shape=(batch_size, 1, 1204, 513),
+        image_shape=(batch_size, 1, 128, 513),
         #filter_shape=(nkerns[0], 1, 5, 5),
         filter_shape=(nkerns[0], 1, 4, 513),
         #poolsize=(2, 2)
@@ -212,7 +212,7 @@
         rng,
         input=layer0.output,
         #image_shape=(batch_size, nkerns[0], 12, 12),
-        image_shape=(batch_size, nkerns[0], 300, 1),
+        image_shape=(batch_size, nkerns[0], 31, 1),
         #filter_shape=(nkerns[1], nkerns[0], 5, 5),
         filter_shape=(nkerns[1], nkerns[0], 4, 1),
         #poolsize=(2, 2)
@@ -229,6 +229,17 @@
         poolsize=(1, 1)
     )
     '''
+    
+    # Construct the third convolutional pooling layer
+    '''
+    layer3 = LeNetConvPoolLayer(
+        rng,
+        input=layer2.output,
+        image_shape=(batch_size, nkerns[2], 296, 123),
+        filter_shape=(nkerns[3], nkerns[2], 5, 5),
+        poolsize=(1, 1)
+    )
+    '''
     # the HiddenLayer being fully-connected, it operates on 2D matrices of
     # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
     # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
@@ -240,7 +251,7 @@
         rng,
         input=layer2_input,
         #n_in=nkerns[1] * 4 * 4,
-        n_in=nkerns[1] * 148 * 1,
+        n_in=nkerns[1] * 14 * 1,
         #n_out=500,
         n_out=513,
         #activation=T.tanh
@@ -272,21 +283,21 @@
             y: valid_set_y[index * batch_size: (index + 1) * batch_size]
         }
     )
-    
+    '''
     # Paulo: Set best param for MLP pre-training
     f = file('/homes/pchilguano/deep_learning/best_params.pkl', 'rb')
     #params3 = cPickle.load(f)
     params0, params1, params2, params3 = cPickle.load(f)
     f.close()
-    #layer0.W.set_value(params0[0])
-    #layer0.b.set_value(params0[1])
+    layer0.W.set_value(params0[0])
+    layer0.b.set_value(params0[1])
     layer1.W.set_value(params1[0])
     layer1.b.set_value(params1[1])
     layer2.W.set_value(params2[0])
     layer2.b.set_value(params2[1])
     layer3.W.set_value(params3[0])
     layer3.b.set_value(params3[1])
-        
+    '''    
     # create a list of all model parameters to be fit by gradient descent
     params = layer3.params + layer2.params + layer1.params + layer0.params
 
--- a/Code/logistic_sgd.py	Tue Jul 28 21:14:27 2015 +0100
+++ b/Code/logistic_sgd.py	Tue Aug 04 12:13:47 2015 +0100
@@ -208,7 +208,7 @@
     f = gzip.open(dataset, 'rb')
     train_set, valid_set, test_set = cPickle.load(f)
     f.close()'''
-    f = file('/homes/pchilguano/deep_learning/gtzan.pkl', 'rb')
+    f = file('/homes/pchilguano/deep_learning/gtzan_3sec.pkl', 'rb')
     train_set, valid_set, test_set = cPickle.load(f)
     f.close()
     #train_set, valid_set, test_set format: tuple(input, target)
--- a/Code/prepare_dataset.py	Tue Jul 28 21:14:27 2015 +0100
+++ b/Code/prepare_dataset.py	Tue Aug 04 12:13:47 2015 +0100
@@ -11,7 +11,7 @@
 import cPickle
 import sklearn.preprocessing as preprocessing
 
-filename = '/homes/pchilguano/deep_learning/features/feats.h5'
+filename = '/homes/pchilguano/deep_learning/features/feats_3sec.h5'
 with tables.openFile(filename, 'r') as f:
     features = f.root.x.read()
     #filenames = f.root.filenames.read()
@@ -23,7 +23,7 @@
 flat_data.shape = (-1, number_of_features)
 scaler = preprocessing.StandardScaler().fit(flat_data)
 flat_data = scaler.transform(flat_data)
-#flat_data.shape = (features.shape[0], -1)
+flat_data.shape = (features.shape[0], -1)
 #flat_targets = filenames.repeat(n_per_example)
 
 #genre = np.asarray([line.strip().split('\t')[1] for line in open(filename,'r').readlines()])
@@ -46,7 +46,7 @@
 train_input, valid_input, test_input = np.array_split(flat_data, [flat_data.shape[0]*4/5, flat_data.shape[0]*9/10])
 train_target, valid_target, test_target = np.array_split(target, [target.shape[0]*4/5, target.shape[0]*9/10])
 
-f = file('/homes/pchilguano/deep_learning/gtzan.pkl', 'wb')
+f = file('/homes/pchilguano/deep_learning/gtzan_3sec.pkl', 'wb')
 cPickle.dump(((train_input, train_target), (valid_input, valid_target), (test_input, test_target)), f, protocol=cPickle.HIGHEST_PROTOCOL)
 f.close()
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Report/abstract/abstract.tex	Tue Aug 04 12:13:47 2015 +0100
@@ -0,0 +1,5 @@
+\begin{abstract}
+
+.
+
+\end{abstract}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Report/acknowledgements/acknowledgements.tex	Tue Aug 04 12:13:47 2015 +0100
@@ -0,0 +1,5 @@
+\begin{abstract}
+
+This is an abstract.
+
+\end{abstract}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Report/chapter1/introduction.tex	Tue Aug 04 12:13:47 2015 +0100
@@ -0,0 +1,25 @@
+\chapter{Introduction}
+
+Recommender systems can be described as facilities that guide users to interesting objects in a huge space of information. In order to achieve high performance, there is the need of hybridization of two or more recommendation techniques.
+
+This project is going to examine a different approach to develop a hybrid music recommender system in order to suggest new items that would be appealing and enjoyable to the users. This system will combine two recommendation techniques. The first technique is collaborative filtering to predict music preferences on the basis of users\' information from an online social network (OSN) such as Last.fm\footnote{http://last.fm/}, and the second technique is content-based filtering in which acoustical features from audio tracks are correlated to compute their similarities.
+
+Users' information will be obtained from the complementary Taste Profile subset, which is a part of the Million Song Dataset\footnote{http://labrosa.ee.columbia.edu/millionsong/}. The music library will be consolidated by crawling songs' information via 7digital API\footnote{http://developer.7digital.com}.
+
+A convolutional neural network (CNN), which is a deep learning model, will be employed for describing the audio files of the music library. Estimation of distribution algorithms (EDA), which are optimization methods in statistics and machine learning, will be investigated to model user profiles that will be comparable with the features of the audio files to predict ratings and produce new item recommendations.
+
+The evaluation of the hybrid recommender system will be assessed by comparing the results with a purely content-based system.
+
+\section{Outline of the thesis}
+
+The rest of the report is organised as follows:
+
+\textbf{Chapter 2} reviews related work with deep learning techniques and Estimation of Distribution Algorithms on recommendation systems.
+
+\textbf{Chapter 3} explains the proposed approach of the hybrid system for recommending new music items.
+
+\textbf{Chapter 4} addresses the experiments and the evaluation scenarios of the performance for the hybrid recommender system.
+
+\textbf{Chapter 5} discusses and analyses the results from the conducted experiments to evaluate the performance of the proposed hybrid music recommender system approach.
+
+\textbf{Chapter 6} presents the conclusions and some thoughts for further research.
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Report/chapter2/background.tex	Tue Aug 04 12:13:47 2015 +0100
@@ -0,0 +1,33 @@
+\chapter{Background}
+
+\section{Recommender Systems}
+
+Recommender systems are software or technical facilities to provide items suggestions or predict customer preferences. These systems play an important role in commercial applications to increase items sales and user satisfaction. Depending on the application, recommender systems can be categorised in the following groups: collaborative filtering, content-based methods and hybrid methods. 
+
+\subsection{Collaborative filtering (CF)}
+In collaborative filtering (CF), recommendations are based on correlation between users' ratings or they can be predicted from historical user data. The strength of CF is that the recommendation process is independent from the item features. On the other hand, CF would not be suitable when the user-item matrix is sparse. \citep{Burke2002331}
+
+\subsection{Content-based methods}
+Content-based methods build user profiles by analysing the users' rated items. Each profile is then processed to be correlated with another item, which has not been rated, to compute the interest of the user on this object. \citep{Lops2011}
+
+\subsection{Hybrid methods}
+Hybrid recommendation is based on the combination of techniques mentioned above, by using the advantages of one system to compensate the disadvantages of the other system. 
+
+In this project, CF, that provides song ratings, is integrated with a content-based method, that compare spectral features of song to achieve hybridisation. 
+%is based on a three-way aspect model \citep{Yoshii2008435}. Real item ratings are obtained through Last.fm API and spectral information are represented by convolutional deep belief networks (CDBN) features computed from items' spectrogram \citep{Lee20091096}.
+
+\section{Online Social Networks}
+Social network sites (SNSs) are “web-based services that allow individuals to (1) construct a public or semi-public profile within a bounded system, (2) articulate a list of other users with whom they share a connection, and (3) view and traverse their list of connections and those made by others within the system”. \citep{JCC4:JCC4393}
+
+%\subsection{APIs}
+%The publicly available music related information can be collected from user profiles on social networks using Application Program Interface (API).
+
+%\section{Data Fusion Techniques}
+%Combination of multiple sources of information to obtain more relevant parameters is known as data fusion.
+%In this study, a cooperative data fusion technique is considered to augment information provided from social network source to content-based system features. \citep{Castanedo2013}
+
+\section{Deep Learning}
+
+\subsection{Convolutional Neural Networks (CNN)}
+
+\section{Estimation of Distribution Algorithms (EDAs)}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Report/chapter3/ch3.tex	Tue Aug 04 12:13:47 2015 +0100
@@ -0,0 +1,15 @@
+\chapter{Methodology}
+\section{Data collection}
+\subsection{Taste profile subset filtering}
+%At this stage, similarities between users is calculated to form a neighbourhood and predict user rating based on combination of the ratings of selected users in the neighbourhood.
+\subsection{Audio samples collection}
+%Classifier creates a model for each user based on the acoustic features of the tracks that user has liked.
+\subsection{Log-mel spectrograms}
+
+\section{Algorithms}
+\subsection{CNN implementation}
+%Deep belief network is a probabilistic model that has one observed layer and several hidden layers.
+\subsubsection{Genre classification}
+
+\subsection{Continuous Bayesian EDA}
+\subsection{EDA-based hybrid recommender}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Report/chapter4/evaluation.tex	Tue Aug 04 12:13:47 2015 +0100
@@ -0,0 +1,25 @@
+\chapter{Experiments}
+\section{Evaluation for recommender systems}
+
+\subsection{Types of experiments}
+The scenarios for experiments requires to define an hypothesis, controlling variables and generalization of the results. Three types of experiments \citep{export:115396} can be used to compare and evaluate recommender algorithms:
+\begin{itemize}
+\item \textbf{Offline experiments:} where recorded historic data of users' ratings are used to simulate online users behaviour. The aim of this type of experiment is to refine approaches before testing with real users. On the other hand, results may have biases due to distribution of users.
+\item \textbf{User studies:} where test subjects interact with the recommendation system and its behaviour is recorded giving a large sets of quantitative measurements. One disadvantage of this type of experiment is to recruit subjects that represent the population of the users of the real recommendation system.
+\item \textbf{Online evaluation:} where the designer of the recommender application expect to influence the users' behaviour. Usually, this type of evaluation are run after extensive offline studies.
+\end{itemize}
+Also, evaluation for recommender systems can be classified \citep{1242} in:
+\begin{itemize}
+\item \textbf{System-centric evaluation:} The accuracy is based only on users' dataset.
+\item \textbf{Network-centric evaluation:} Other components of the recommendation system such as diversity of recommendations are measured as a complement of the metrics of system-centric evaluation.
+\item \textbf{User-centric evaluation:} The perceived quality and usefulness of recommendations for the users are measured via provided feedback.
+\end{itemize}
+\section{Evaluation settings}
+The hybrid recommender system of this project is evaluated with an offline experiment and system-centric metrics.
+\subsection{Dataset}
+For the purpose of evaluation of the hybrid recommender system, a part from the Taste Profile subset is used because the data format includes user-item ratings and it is publicly available. A 10-fold cross validation is performed which splits the data set in 90\% for training and 10\% for testing.
+
+\subsection{Evaluation measures}
+Because the data set does not include explicit ratings, hence, the number of plays of tracks are used as users' behaviours, decision-based metrics are considered.
+\subsection{Experimentation aims}
+In order to evaluate the performance of the hybrid recommender, the prediction ratings are compared with a model-based collaborative filtering.
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Report/chapter5/results.tex	Tue Aug 04 12:13:47 2015 +0100
@@ -0,0 +1,1 @@
+\chapter{Results}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Report/chapter6/conclusions.tex	Tue Aug 04 12:13:47 2015 +0100
@@ -0,0 +1,3 @@
+\chapter{Conclusion}
+
+\section{Future work}
\ No newline at end of file
--- a/Report/chiliguano_msc_finalproject.aux	Tue Jul 28 21:14:27 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,18 +0,0 @@
-\relax 
-\@input{ch1/ch1.aux}
-\@input{ch2/ch2.aux}
-\@input{ch3/ch3.aux}
-\@input{ch4/ch4.aux}
-\@input{ch5/ch5.aux}
-\@input{ch6/ch6.aux}
-\bibstyle{plain}
-\bibdata{chiliguano_msc_finalproject}
-\bibcite{JCC4:JCC4393}{1}
-\bibcite{Burke02}{2}
-\bibcite{Castanedo13}{3}
-\bibcite{1242}{4}
-\bibcite{Lee09}{5}
-\bibcite{Lops11}{6}
-\bibcite{export:115396}{7}
-\bibcite{Yoshii08}{8}
-\@writefile{toc}{\contentsline {chapter}{Bibliography}{16}}
--- a/Report/chiliguano_msc_finalproject.bbl	Tue Jul 28 21:14:27 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-\begin{thebibliography}{1}
-
-\bibitem{JCC4:JCC4393}
-danah~m. boyd and Nicole~B. Ellison.
-\newblock Social network sites: Definition, history, and scholarship.
-\newblock {\em Journal of Computer-Mediated Communication}, 13(1):210--230,
-  2007.
-
-\bibitem{Burke02}
-R.~Burke.
-\newblock Hybrid recommender systems: Survey and experiments.
-\newblock {\em User Modelling and User-Adapted Interaction}, 12(4):331--370,
-  2002.
-
-\bibitem{Castanedo13}
-F.~Castanedo.
-\newblock A review of data fusion techniques.
-\newblock {\em The Scientific World Journal}, 2013, 2013.
-
-\bibitem{1242}
-{\`O}.~Celma.
-\newblock {\em Music Recommendation and Discovery in the Long Tail}.
-\newblock PhD thesis, Universitat Pompeu Fabra, Barcelona, 2008.
-
-\bibitem{Lee09}
-H.~Lee, L.~Yan, P.~Pham, and A.~Y. Ng.
-\newblock Unsupervised feature learning for audio classification using
-  convolutional deep belief networks.
-\newblock In {\em Advances in Neural Information Processing Systems 22 -
-  Proceedings of the 2009 Conference}, pages 1096--1104, 2009.
-
-\bibitem{Lops11}
-Pasquale Lops, Marco de~Gemmis, and Giovanni Semeraro.
-\newblock {\em Content-based Recommender Systems: State of the Art and Trends},
-  pages 73--105.
-\newblock Springer US, Boston, MA, 2011.
-
-\bibitem{export:115396}
-Guy Shani and Asela Gunawardana.
-\newblock Evaluating recommender systems.
-\newblock Technical Report MSR-TR-2009-159, November 2009.
-
-\bibitem{Yoshii08}
-K.~Yoshii, M.~Goto, K.~Komatani, T.~Ogata, and H.~G. Okuno.
-\newblock An efficient hybrid music recommender system using an incrementally
-  trainable probabilistic generative model.
-\newblock {\em IEEE Transactions on Audio, Speech and Language Processing},
-  16(2):435--447, 2008.
-
-\end{thebibliography}
--- a/Report/chiliguano_msc_finalproject.blg	Tue Jul 28 21:14:27 2015 +0100
+++ b/Report/chiliguano_msc_finalproject.blg	Tue Aug 04 12:13:47 2015 +0100
@@ -1,11 +1,55 @@
-This is BibTeX, Version 0.99dThe top-level auxiliary file: chiliguano_msc_finalproject.aux
-A level-1 auxiliary file: ch1/ch1.aux
-A level-1 auxiliary file: ch2/ch2.aux
-A level-1 auxiliary file: ch3/ch3.aux
-A level-1 auxiliary file: ch4/ch4.aux
-A level-1 auxiliary file: ch5/ch5.aux
-A level-1 auxiliary file: ch6/ch6.aux
-The style file: plain.bst
-Database file #1: chiliguano_msc_finalproject.bib
-Warning--empty institution in export:115396
-(There was 1 warning)
+This is BibTeX, Version 0.99d (TeX Live 2015)
+Capacity: max_strings=35307, hash_size=35307, hash_prime=30011
+The top-level auxiliary file: chiliguano_msc_finalproject.aux
+A level-1 auxiliary file: abstract/abstract.aux
+A level-1 auxiliary file: chapter1/introduction.aux
+A level-1 auxiliary file: chapter2/background.aux
+A level-1 auxiliary file: chapter3/ch3.aux
+A level-1 auxiliary file: chapter4/evaluation.aux
+A level-1 auxiliary file: chapter5/results.aux
+A level-1 auxiliary file: chapter6/conclusions.aux
+The style file: agsm.bst
+Database file #1: references.bib
+Warning--empty institution in export:115396
+You've used 5 entries,
+            2909 wiz_defined-function locations,
+            689 strings with 6072 characters,
+and the built_in function-call counts, 15635 in all, are:
+= -- 2478
+> -- 225
+< -- 1
++ -- 1137
+- -- 67
+* -- 1316
+:= -- 3247
+add.period$ -- 6
+call.type$ -- 5
+change.case$ -- 69
+chr.to.int$ -- 5
+cite$ -- 6
+duplicate$ -- 79
+empty$ -- 162
+format.name$ -- 86
+if$ -- 2796
+int.to.chr$ -- 5
+int.to.str$ -- 0
+missing$ -- 3
+newline$ -- 24
+num.names$ -- 53
+pop$ -- 20
+preamble$ -- 1
+purify$ -- 75
+quote$ -- 0
+skip$ -- 64
+stack$ -- 0
+substring$ -- 3413
+swap$ -- 14
+text.length$ -- 71
+text.prefix$ -- 0
+top$ -- 0
+type$ -- 20
+warning$ -- 1
+while$ -- 111
+width$ -- 0
+write$ -- 75
+(There was 1 warning)
--- a/Report/chiliguano_msc_finalproject.lof	Tue Jul 28 21:14:27 2015 +0100
+++ b/Report/chiliguano_msc_finalproject.lof	Tue Aug 04 12:13:47 2015 +0100
@@ -1,6 +1,6 @@
-\addvspace {10\p@ }
-\addvspace {10\p@ }
-\addvspace {10\p@ }
-\addvspace {10\p@ }
-\addvspace {10\p@ }
-\addvspace {10\p@ }
+\addvspace {10\p@ }
+\addvspace {10\p@ }
+\addvspace {10\p@ }
+\addvspace {10\p@ }
+\addvspace {10\p@ }
+\addvspace {10\p@ }
--- a/Report/chiliguano_msc_finalproject.log	Tue Jul 28 21:14:27 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,262 +0,0 @@
-This is pdfTeX, Version 3.14159265-2.6-1.40.15 (MiKTeX 2.9 64-bit) (preloaded format=pdflatex 2015.1.29)  29 JUN 2015 20:26
-entering extended mode
-**chiliguano_msc_finalproject.tex
-(chiliguano_msc_finalproject.tex
-LaTeX2e <2014/05/01>
-Babel <3.9l> and hyphenation patterns for 68 languages loaded.
-(qmwphd.cls
-Document Class: qmwphd 1997/07/22 QMW DCS PhD Thesis Class
-("C:\Program Files\MiKTeX 2.9\tex\latex\amsfonts\amsfonts.sty"
-Package: amsfonts 2013/01/14 v3.01 Basic AMSFonts support
-\@emptytoks=\toks14
-\symAMSa=\mathgroup4
-\symAMSb=\mathgroup5
-LaTeX Font Info:    Overwriting math alphabet `\mathfrak' in version `bold'
-(Font)                  U/euf/m/n --> U/euf/b/n on input line 106.
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\amsfonts\amssymb.sty"
-Package: amssymb 2013/01/14 v3.01 AMS font symbols
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\stmaryrd\stmaryrd.sty"
-Package: stmaryrd 1994/03/03 St Mary's Road symbol package
-\symstmry=\mathgroup6
-LaTeX Font Info:    Overwriting symbol font `stmry' in version `bold'
-(Font)                  U/stmry/m/n --> U/stmry/b/n on input line 89.
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\psnfss\times.sty"
-Package: times 2005/04/12 PSNFSS-v9.2a (SPQR) 
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\psnfss\mathptmx.sty"
-Package: mathptmx 2005/04/12 PSNFSS-v9.2a Times w/ Math, improved (SPQR, WaS) 
-LaTeX Font Info:    Redeclaring symbol font `operators' on input line 28.
-LaTeX Font Info:    Overwriting symbol font `operators' in version `normal'
-(Font)                  OT1/cmr/m/n --> OT1/ztmcm/m/n on input line 28.
-LaTeX Font Info:    Overwriting symbol font `operators' in version `bold'
-(Font)                  OT1/cmr/bx/n --> OT1/ztmcm/m/n on input line 28.
-LaTeX Font Info:    Redeclaring symbol font `letters' on input line 29.
-LaTeX Font Info:    Overwriting symbol font `letters' in version `normal'
-(Font)                  OML/cmm/m/it --> OML/ztmcm/m/it on input line 29.
-LaTeX Font Info:    Overwriting symbol font `letters' in version `bold'
-(Font)                  OML/cmm/b/it --> OML/ztmcm/m/it on input line 29.
-LaTeX Font Info:    Redeclaring symbol font `symbols' on input line 30.
-LaTeX Font Info:    Overwriting symbol font `symbols' in version `normal'
-(Font)                  OMS/cmsy/m/n --> OMS/ztmcm/m/n on input line 30.
-LaTeX Font Info:    Overwriting symbol font `symbols' in version `bold'
-(Font)                  OMS/cmsy/b/n --> OMS/ztmcm/m/n on input line 30.
-LaTeX Font Info:    Redeclaring symbol font `largesymbols' on input line 31.
-LaTeX Font Info:    Overwriting symbol font `largesymbols' in version `normal'
-(Font)                  OMX/cmex/m/n --> OMX/ztmcm/m/n on input line 31.
-LaTeX Font Info:    Overwriting symbol font `largesymbols' in version `bold'
-(Font)                  OMX/cmex/m/n --> OMX/ztmcm/m/n on input line 31.
-\symbold=\mathgroup7
-\symitalic=\mathgroup8
-LaTeX Font Info:    Redeclaring math alphabet \mathbf on input line 34.
-LaTeX Font Info:    Overwriting math alphabet `\mathbf' in version `normal'
-(Font)                  OT1/cmr/bx/n --> OT1/ptm/bx/n on input line 34.
-LaTeX Font Info:    Overwriting math alphabet `\mathbf' in version `bold'
-(Font)                  OT1/cmr/bx/n --> OT1/ptm/bx/n on input line 34.
-LaTeX Font Info:    Redeclaring math alphabet \mathit on input line 35.
-LaTeX Font Info:    Overwriting math alphabet `\mathit' in version `normal'
-(Font)                  OT1/cmr/m/it --> OT1/ptm/m/it on input line 35.
-LaTeX Font Info:    Overwriting math alphabet `\mathit' in version `bold'
-(Font)                  OT1/cmr/bx/it --> OT1/ptm/m/it on input line 35.
-LaTeX Info: Redefining \hbar on input line 50.
-)
-LaTeX Font Info:    Redeclaring symbol font `symbols' on input line 24.
-LaTeX Font Info:    Overwriting symbol font `symbols' in version `normal'
-(Font)                  OMS/ztmcm/m/n --> OMS/cmsy/m/n on input line 24.
-LaTeX Font Info:    Overwriting symbol font `symbols' in version `bold'
-(Font)                  OMS/ztmcm/m/n --> OMS/cmsy/m/n on input line 24.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\report.cls"
-Document Class: report 2014/09/29 v1.4h Standard LaTeX document class
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\size12.clo"
-File: size12.clo 2014/09/29 v1.4h Standard LaTeX file (size option)
-)
-\c@part=\count79
-\c@chapter=\count80
-\c@section=\count81
-\c@subsection=\count82
-\c@subsubsection=\count83
-\c@paragraph=\count84
-\c@subparagraph=\count85
-\c@figure=\count86
-\c@table=\count87
-\abovecaptionskip=\skip41
-\belowcaptionskip=\skip42
-\bibindent=\dimen102
-)
-\hour=\count88
-\minute=\count89
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\moreverb\moreverb.sty"
-Package: moreverb 2008/06/03 v2.3a `more' verbatim facilities
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\tools\verbatim.sty"
-Package: verbatim 2014/10/28 v1.5q LaTeX2e package for verbatim enhancements
-\every@verbatim=\toks15
-\verbatim@line=\toks16
-\verbatim@in@stream=\read1
-)
-\tab@position=\count90
-\tab@size=\count91
-\listing@line=\count92
-)
-(chiliguano_msc_finalproject.aux (ch1/ch1.aux) (ch2/ch2.aux) (ch3/ch3.aux)
-(ch4/ch4.aux) (ch5/ch5.aux) (ch6/ch6.aux))
-LaTeX Font Info:    Checking defaults for OML/cmm/m/it on input line 4.
-LaTeX Font Info:    ... okay on input line 4.
-LaTeX Font Info:    Checking defaults for T1/cmr/m/n on input line 4.
-LaTeX Font Info:    ... okay on input line 4.
-LaTeX Font Info:    Checking defaults for OT1/cmr/m/n on input line 4.
-LaTeX Font Info:    ... okay on input line 4.
-LaTeX Font Info:    Checking defaults for OMS/cmsy/m/n on input line 4.
-LaTeX Font Info:    ... okay on input line 4.
-LaTeX Font Info:    Checking defaults for OMX/cmex/m/n on input line 4.
-LaTeX Font Info:    ... okay on input line 4.
-LaTeX Font Info:    Checking defaults for U/cmr/m/n on input line 4.
-LaTeX Font Info:    ... okay on input line 4.
-LaTeX Font Info:    Try loading font information for OT1+ptm on input line 4.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\psnfss\ot1ptm.fd"
-File: ot1ptm.fd 2001/06/04 font definitions for OT1/ptm.
-)
-\big@size=\dimen103
-LaTeX Font Info:    Font shape `OT1/ptm/bx/n' in size <20.74> not available
-(Font)              Font shape `OT1/ptm/b/n' tried instead on input line 12.
-LaTeX Font Info:    Font shape `OT1/ptm/bx/n' in size <14.4> not available
-(Font)              Font shape `OT1/ptm/b/n' tried instead on input line 12.
-LaTeX Font Info:    Font shape `OT1/ptm/bx/n' in size <12> not available
-(Font)              Font shape `OT1/ptm/b/n' tried instead on input line 12.
- [1{C:/ProgramData/MiKTeX/2.9/pdftex/config/pdftex.map}
-
-
-
-] [2
-
-
-
-] (chiliguano_msc_finalproject.toc
-LaTeX Font Info:    Try loading font information for OT1+ztmcm on input line 2.
-
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\psnfss\ot1ztmcm.fd"
-File: ot1ztmcm.fd 2000/01/03 Fontinst v1.801 font definitions for OT1/ztmcm.
-)
-LaTeX Font Info:    Try loading font information for OML+ztmcm on input line 2.
-
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\psnfss\omlztmcm.fd"
-File: omlztmcm.fd 2000/01/03 Fontinst v1.801 font definitions for OML/ztmcm.
-)
-LaTeX Font Info:    Try loading font information for OMX+ztmcm on input line 2.
-
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\psnfss\omxztmcm.fd"
-File: omxztmcm.fd 2000/01/03 Fontinst v1.801 font definitions for OMX/ztmcm.
-)
-LaTeX Font Info:    Try loading font information for U+stmry on input line 2.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\stmaryrd\ustmry.fd")
-LaTeX Font Info:    Font shape `OT1/ptm/bx/n' in size <9> not available
-(Font)              Font shape `OT1/ptm/b/n' tried instead on input line 2.
-LaTeX Font Info:    Font shape `OT1/ptm/bx/n' in size <7> not available
-(Font)              Font shape `OT1/ptm/b/n' tried instead on input line 2.
- [3
-
-
-
-])
-\tf@toc=\write3
- [4]
-(chiliguano_msc_finalproject.lof)
-\tf@lof=\write4
- [5
-
-
-] [6
-
-
-] (ch1/ch1.tex
-Chapter 1.
-[7
-
-
-
-
-
-]) [8] (ch2/ch2.tex
-Chapter 2.
-[9
-
-
-
-
-]
-Missing character: There is no  in font ptmr7t!
-Missing character: There is no  in font ptmr7t!
-Missing character: There is no  in font ptmr7t!
-Missing character: There is no  in font ptmr7t!
-Missing character: There is no  in font ptmr7t!
-Missing character: There is no  in font ptmr7t!
-) [10] (ch3/ch3.tex
-Chapter 3.
-) [11
-
-
-
-
-] (ch4/ch4.tex
-Chapter 4.
-LaTeX Font Info:    Try loading font information for OMS+ptm on input line 7.
-("C:\Program Files\MiKTeX 2.9\tex\latex\psnfss\omsptm.fd"
-File: omsptm.fd 
-)
-LaTeX Font Info:    Font shape `OMS/ptm/m/n' in size <12> not available
-(Font)              Font shape `OMS/cmsy/m/n' tried instead on input line 7.
- [12
-
-
-
-
-]) [13]
-(ch5/ch5.tex
-Chapter 5.
-) [14
-
-
-
-
-] (ch6/ch6.tex
-Chapter 6.
-) [15
-
-
-
-
-] (chiliguano_msc_finalproject.bbl) [16
-
-
-
-] (chiliguano_msc_finalproject.aux
-(ch1/ch1.aux) (ch2/ch2.aux) (ch3/ch3.aux) (ch4/ch4.aux) (ch5/ch5.aux)
-(ch6/ch6.aux)) ) 
-Here is how much of TeX's memory you used:
- 1003 strings out of 493698
- 12288 string characters out of 3144412
- 59638 words of memory out of 3000000
- 4294 multiletter control sequences out of 15000+200000
- 21442 words of font info for 51 fonts, out of 3000000 for 9000
- 1025 hyphenation exceptions out of 8191
- 28i,5n,26p,582b,186s stack positions out of 5000i,500n,10000p,200000b,50000s
-{C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/fontname/8r.enc}<C
-:/Program Files/MiKTeX 2.9/fonts/type1/public/amsfonts/cm/cmsy10.pfb><C:/Progra
-m Files/MiKTeX 2.9/fonts/type1/urw/times/utmb8a.pfb><C:/Program Files/MiKTeX 2.
-9/fonts/type1/urw/times/utmr8a.pfb><C:/Program Files/MiKTeX 2.9/fonts/type1/urw
-/times/utmri8a.pfb>
-Output written on chiliguano_msc_finalproject.pdf (16 pages, 76350 bytes).
-PDF statistics:
- 71 PDF objects out of 1000 (max. 8388607)
- 0 named destinations out of 1000 (max. 500000)
- 1 words of extra memory for PDF output out of 10000 (max. 10000000)
-
Binary file Report/chiliguano_msc_finalproject.pdf has changed
Binary file Report/chiliguano_msc_finalproject.synctex.gz has changed
--- a/Report/chiliguano_msc_finalproject.tex	Tue Jul 28 21:14:27 2015 +0100
+++ b/Report/chiliguano_msc_finalproject.tex	Tue Aug 04 12:13:47 2015 +0100
@@ -1,41 +1,100 @@
-\documentclass[12pt,draft,spaced,oneside,openright]{qmwphd}
+% \documentclass[12pt,doubledspaced,oneside,openright]{qmwphd}
 % Final MSc project report using qmwphd.cls
-\usepackage{moreverb} % This defines \verbatiminput.
+% \usepackage{moreverb} % This defines \verbatiminput.
+
+\documentclass[a4paper,12pt,draft]{report}
+%PhD Thesis Template for the School of Electronic Engineering and Computer Science, Queen Mary University of London. Stripped from Dan Stowell's PhD.
+
+%BEFORE SUBMISSION DO THESE:
+% * deactivate all \includeonly
+% * ensure \doneit set to nothing
+% * ensure numbering CONTINUOUS from title page on through
+% * activate the includes of license, ack, etc
+% * check through for question mark errors in render
+% * make sure the bibliog doesn't have ugly urls in
+
+\usepackage{ifdraft}
+\usepackage{amsmath}
+\usepackage{amsfonts}
+\usepackage{amssymb}
+\usepackage{natbib}
+\usepackage{har2nat}
+\usepackage{rotating}
+\usepackage[breaklinks]{hyperref}
+\usepackage{subfig} % apparently subfig is the one to use not subfigure
+\usepackage{appendix}
+\usepackage{tipa}
+\usepackage{clrscode}
+\usepackage{setspace}
+\usepackage[absolute]{textpos} 
+
+
 \begin{document}
+	
+\setlength{\TPHorizModule}{200mm} 
+\setlength{\TPVertModule}{100mm} 
+\textblockorigin{61mm}{19mm}
+	
+%%%%% thanks alex mclean for super-useful onscreen reading tip:
+%\usepackage[top=0.1in, bottom=0.1in, left=0.3in, right=0.3in, paperwidth=11in, paperheight=7in]{geometry} % activate for ONSCREEN reading shape AT HOME
+%\usepackage[top=0.1in, bottom=0.1in, left=0.3in, right=0.3in, paperwidth=11in, paperheight=8.5in]{geometry} % activate for ONSCREEN reading shape AT WORK
 
-\frontmatter
+\doublespacing{}
 
-\author{Paulo Esteban Chiliguano Torres}
+% numbering starts from here:
+\pagenumbering{arabic}
+
+% titlepage stuff
 \title{Hybrid music recommender using content-based and social information}
-\qualification{Master of Science} 
+\author{Paulo Esteban Chiliguano Torres \\
+	\\
+	Project report 2015\\
+	\\
+	School of Electronic Engineering and Computer Science\\
+	Queen Mary University of London
+}
 
+\date{2015}
+
+% \frontmatter
+% \author{Paulo Esteban Chiliguano Torres}
+% \title{Hybrid music recommender using content-based and social information}
+% \qualification{Master of Science} 
 \maketitle
-\begin{summary}
-.
-\end{summary}
+
+% \include{acknowledgements/acknowledgements}
+
+% \begin{summary}
+% \end{summary}
+\include{abstract/abstract}
+
+\setcounter{page}{3}
 
 \tableofcontents
 
 \listoffigures
 
+\listoftables
+
 % could also have a \listoftables, but this example doesn't include any
 
-\begin{acknowledgements}
-.
-\end{acknowledgements}
+%\mainmatter
+% Start the main context
+\include{chapter1/introduction}
 
-\mainmatter
-% Start the main context
-\include{ch1/ch1}
-\include{ch2/ch2}
-\include{ch3/ch3}
-\include{ch4/ch4} 
-\include{ch5/ch5}
-\include{ch6/ch6}
+\include{chapter2/background}
 
-\bibliographystyle{plain}
-\bibliography{chiliguano_msc_finalproject}
+\include{chapter3/ch3}
 
-\backmatter
+\include{chapter4/evaluation}
+
+\include{chapter5/results}
+
+\include{chapter6/conclusions}
+
+\bibliographystyle{agsm}
+\bibliography{references}
+
+%\backmatter
 
 \end{document}
\ No newline at end of file
--- a/Report/chiliguano_msc_finalproject.toc	Tue Jul 28 21:14:27 2015 +0100
+++ b/Report/chiliguano_msc_finalproject.toc	Tue Aug 04 12:13:47 2015 +0100
@@ -1,27 +1,31 @@
-\contentsline {chapter}{\numberline {1}Introduction}{7}
-\contentsline {section}{\numberline {1.1}Outline of the thesis}{8}
-\contentsline {chapter}{\numberline {2}Background}{9}
-\contentsline {section}{\numberline {2.1}Recommender Systems}{9}
-\contentsline {subsection}{\numberline {2.1.1}Content-based Recommender Systems}{9}
-\contentsline {subsection}{\numberline {2.1.2}Collaborative filtering Recommender System}{9}
-\contentsline {subsection}{\numberline {2.1.3}Hybrid Recommender Systems}{10}
-\contentsline {section}{\numberline {2.2}Online Social Networks}{10}
-\contentsline {subsection}{\numberline {2.2.1}APIs}{10}
-\contentsline {section}{\numberline {2.3}Data Fusion Techniques}{10}
-\contentsline {chapter}{\numberline {3}Main contribution}{11}
-\contentsline {section}{\numberline {3.1}Methods}{11}
-\contentsline {subsection}{\numberline {3.1.1}Content based modelling}{11}
-\contentsline {subsection}{\numberline {3.1.2}Collaborative filtering}{11}
-\contentsline {section}{\numberline {3.2}Algorithms}{11}
-\contentsline {subsection}{\numberline {3.2.1}Deep Belief Networks}{11}
-\contentsline {subsubsection}{Convolutional Deep Belief Network (CDBN)}{11}
-\contentsline {chapter}{\numberline {4}Experiments}{12}
-\contentsline {section}{\numberline {4.1}Evaluation for recommender systems}{12}
-\contentsline {subsection}{\numberline {4.1.1}Types of experiments}{12}
-\contentsline {section}{\numberline {4.2}Evaluation settings}{13}
-\contentsline {subsection}{\numberline {4.2.1}Dataset}{13}
-\contentsline {subsection}{\numberline {4.2.2}Evaluation measures}{13}
-\contentsline {subsection}{\numberline {4.2.3}Experimentation aims}{13}
-\contentsline {chapter}{\numberline {5}Results}{14}
-\contentsline {chapter}{\numberline {6}Conclusion}{15}
-\contentsline {chapter}{Bibliography}{16}
+\contentsline {chapter}{\numberline {1}Introduction}{7}{chapter.1}
+\contentsline {section}{\numberline {1.1}Outline of the thesis}{8}{section.1.1}
+\contentsline {chapter}{\numberline {2}Background}{9}{chapter.2}
+\contentsline {section}{\numberline {2.1}Recommender Systems}{9}{section.2.1}
+\contentsline {subsection}{\numberline {2.1.1}Collaborative filtering (CF)}{9}{subsection.2.1.1}
+\contentsline {subsection}{\numberline {2.1.2}Content-based methods}{10}{subsection.2.1.2}
+\contentsline {subsection}{\numberline {2.1.3}Hybrid methods}{10}{subsection.2.1.3}
+\contentsline {section}{\numberline {2.2}Online Social Networks}{10}{section.2.2}
+\contentsline {section}{\numberline {2.3}Deep Learning}{11}{section.2.3}
+\contentsline {subsection}{\numberline {2.3.1}Convolutional Neural Networks (CNN)}{11}{subsection.2.3.1}
+\contentsline {section}{\numberline {2.4}Estimation of Distribution Algorithms (EDAs)}{11}{section.2.4}
+\contentsline {chapter}{\numberline {3}Methodology}{12}{chapter.3}
+\contentsline {section}{\numberline {3.1}Data collection}{12}{section.3.1}
+\contentsline {subsection}{\numberline {3.1.1}Taste profile subset filtering}{12}{subsection.3.1.1}
+\contentsline {subsection}{\numberline {3.1.2}Audio samples collection}{12}{subsection.3.1.2}
+\contentsline {subsection}{\numberline {3.1.3}Log-mel spectrograms}{12}{subsection.3.1.3}
+\contentsline {section}{\numberline {3.2}Algorithms}{12}{section.3.2}
+\contentsline {subsection}{\numberline {3.2.1}CNN implementation}{12}{subsection.3.2.1}
+\contentsline {subsubsection}{Genre classification}{12}{section*.4}
+\contentsline {subsection}{\numberline {3.2.2}Continuous Bayesian EDA}{12}{subsection.3.2.2}
+\contentsline {subsection}{\numberline {3.2.3}EDA-based hybrid recommender}{12}{subsection.3.2.3}
+\contentsline {chapter}{\numberline {4}Experiments}{13}{chapter.4}
+\contentsline {section}{\numberline {4.1}Evaluation for recommender systems}{13}{section.4.1}
+\contentsline {subsection}{\numberline {4.1.1}Types of experiments}{13}{subsection.4.1.1}
+\contentsline {section}{\numberline {4.2}Evaluation settings}{14}{section.4.2}
+\contentsline {subsection}{\numberline {4.2.1}Dataset}{14}{subsection.4.2.1}
+\contentsline {subsection}{\numberline {4.2.2}Evaluation measures}{15}{subsection.4.2.2}
+\contentsline {subsection}{\numberline {4.2.3}Experimentation aims}{15}{subsection.4.2.3}
+\contentsline {chapter}{\numberline {5}Results}{16}{chapter.5}
+\contentsline {chapter}{\numberline {6}Conclusion}{17}{chapter.6}
+\contentsline {section}{\numberline {6.1}Future work}{17}{section.6.1}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Report/references.bib	Tue Aug 04 12:13:47 2015 +0100
@@ -0,0 +1,165 @@
+@incollection{Lops2011,
+	year={2011},
+	isbn={978-0-387-85819-7},
+	booktitle={Recommender Systems Handbook},
+	editor={Ricci, Francesco and Rokach, Lior and Shapira, Bracha and Kantor, Paul B.},
+	doi={10.1007/978-0-387-85820-3_3},
+	title={Content-based Recommender Systems: State of the Art and Trends},
+	url={http://dx.doi.org/10.1007/978-0-387-85820-3\_3},
+	publisher={Springer US},
+	author={Lops, Pasquale and de Gemmis, Marco and Semeraro, Giovanni},
+	pages={73-105},
+	language={English}
+}
+
+@ARTICLE{Burke2002331,
+	author={Burke, R.},
+	title={Hybrid recommender systems: Survey and experiments},
+	journal={User Modelling and User-Adapted Interaction},
+	year={2002},
+	volume={12},
+	number={4},
+	pages={331-370},
+	doi={10.1023/A:1021240730564},
+	note={cited By 0},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0036959356\&partnerID=40\&md5=28885a102109be826507abc2435117a7},
+	document_type={Article},
+	source={Scopus},
+}
+
+@ARTICLE{Yoshii2008435,
+	author={Yoshii, K. and Goto, M. and Komatani, K. and Ogata, T. and Okuno, H.G.},
+	title={An efficient hybrid music recommender system using an incrementally trainable probabilistic generative model},
+	journal={IEEE Transactions on Audio, Speech and Language Processing},
+	year={2008},
+	volume={16},
+	number={2},
+	pages={435-447},
+	doi={10.1109/TASL.2007.911503},
+	art_number={4432655},
+	note={cited By 0},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-39649112098\&partnerID=40\&md5=6827f82844ae1da58a6fa95caf5092d9},
+	document_type={Article},
+	source={Scopus},
+}
+
+
+@article{JCC4:JCC4393,
+author = {boyd, danah m. and Ellison, Nicole B.},
+title = {Social Network Sites: Definition, History, and Scholarship},
+journal = {Journal of Computer-Mediated Communication},
+volume = {13},
+number = {1},
+publisher = {Blackwell Publishing Inc},
+issn = {1083-6101},
+url = {http://dx.doi.org/10.1111/j.1083-6101.2007.00393.x},
+doi = {10.1111/j.1083-6101.2007.00393.x},
+pages = {210--230},
+year = {2007},
+}
+
+@ARTICLE{Castanedo2013,
+	author={Castanedo, F.},
+	title={A review of data fusion techniques},
+	journal={The Scientific World Journal},
+	year={2013},
+	volume={2013},
+	doi={10.1155/2013/704504},
+	art_number={704504},
+	note={cited By 0},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84888882639\&partnerID=40\&md5=827fabc750db24f662fdae1c798f2507},
+	document_type={Review},
+	source={Scopus},
+}
+
+
+@CONFERENCE{Lee20091096,
+	author={Lee, H. and Yan, L. and Pham, P. and Ng, A.Y.},
+	title={Unsupervised feature learning for audio classification using convolutional deep belief networks},
+	journal={Advances in Neural Information Processing Systems 22 - Proceedings of the 2009 Conference},
+	year={2009},
+	pages={1096-1104},
+	note={cited By 0},
+	url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84863380535\&partnerID=40\&md5=e872a6227c816850167f91bb2d41d8b7},
+	document_type={Conference Paper},
+	source={Scopus},
+}
+
+
+@TechReport {export:115396,
+abstract     = {<p>Recommender systems are now popular both commercially and in the research
+                community, where many approaches have been suggested for providing
+                recommendations. In many cases a system designer that wishes to employ a
+                recommendation system must choose between a set of candidate approaches.  A first
+                step towards selecting an appropriate algorithm is to decide which properties of
+                the application to focus upon when making this choice.  Indeed, recommendation
+                systems have a variety of properties that may affect user experience, such as
+                accuracy, robustness, scalability, and so forth. In this paper we discuss how to
+                compare recommenders based on a set of properties that are relevant for e
+                application. We focus on comparative studies, where a few algorithms are compared
+                using some evaluation metric, rather than absolute benchmarking of algorithms. We
+                describe experimental settings appropriate for making choices between algorithms.
+                We review three types of experiments, starting with an offline setting, where
+                recommendation approaches are compared without user interaction, then reviewing
+                user studies, where a small group of subjects experiment with the system and
+                report on the experience, and finally describe large scale online experiments,
+                where real user populations interact with the system. In each of these cases we
+                describe types of questions that can be answered, and suggest protocols for
+                experimentation. We also discuss how to draw trustworthy conclusions from e
+                conducted experiments. We then review a large set of properties, and explain how
+                to evaluate systems given relevant properties. We also survey a large set of
+                evaluation metrics in the context of the property that they evaluate.</p>},
+author       = {Guy Shani and Asela Gunawardana},
+month        = {November},
+number       = {MSR-TR-2009-159},
+publisher    = {Microsoft Research},
+title        = {Evaluating Recommender Systems},
+url          = {http://research.microsoft.com/apps/pubs/default.aspx?id=115396},
+year         = {2009},
+}
+
+@phdthesis {1242,
+	title = {Music Recommendation and Discovery in the Long Tail},
+	year = {2008},
+	school = {Universitat Pompeu Fabra},
+	address = {Barcelona},
+	abstract = {<p class="small">
+Music consumption is biased towards a few popular artists. For instance, in 2007 only 1\% of
+all digital tracks accounted for 80\% of all sales. Similarly, 1,000 albums accounted for 50\%
+of all album sales, and 80\% of all albums sold were purchased less than 100 times. There is
+a need to assist people to filter, discover, personalise and recommend from the huge amount
+of music content available along the Long Tail.
+</p>
+<p class="small">
+Current music recommendation algorithms try to
+accurately predict what people demand to listen to. However, quite
+often these algorithms tend to recommend popular -or well-known to the
+user- music, decreasing the effectiveness of the recommendations. These
+approaches focus on improving the accuracy of the recommendations. That
+is, try to make
+accurate predictions about what a user could listen to, or buy next,
+independently of how
+useful to the user could be the provided recommendations.
+</p>
+<p class="small">
+In this Thesis we stress the importance of the user{\textquoteright}s
+perceived quality of the recommendations. We model the Long Tail curve
+of artist popularity to predict -potentially-
+interesting and unknown music, hidden in the tail of the popularity
+curve. Effective recommendation systems should promote novel and
+relevant material (non-obvious recommendations), taken primarily from
+the tail of a popularity distribution.
+</p>
+<p class="small">
+The main contributions of this Thesis are: <em>(i)</em> a novel network-based approach for
+recommender systems, based on the analysis of the item (or user) similarity graph, and the
+popularity of the items, <em>(ii)</em> a user-centric evaluation that measures the user{\textquoteright}s relevance
+and novelty of the recommendations, and <em>(iii)</em> two prototype systems that implement the
+ideas derived from the theoretical work. Our findings have significant implications for
+recommender systems that assist users to explore the Long Tail, digging for content they
+might like.
+</p>
+},
+	url = {http://mtg.upf.edu/static/media/PhD\_ocelma.pdf},
+	author = {Celma, \{`O}.},
+}