m@9: { m@9: "cells": [ m@9: { m@9: "cell_type": "code", m@9: "execution_count": 7, m@9: "metadata": { m@9: "collapsed": false m@9: }, m@9: "outputs": [], m@9: "source": [ m@9: "import numpy as np\n", m@9: "from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\n", m@9: "\n", m@9: "import sys\n", m@9: "sys.path.append('../')\n", m@9: "import scripts.map_and_average as mapper\n", m@9: "import scripts.util_feature_learning as util_feature_learning" m@9: ] m@9: }, m@9: { m@9: "cell_type": "markdown", m@9: "metadata": {}, m@9: "source": [ m@9: "## Load data" m@9: ] m@9: }, m@9: { m@9: "cell_type": "code", m@9: "execution_count": 8, m@9: "metadata": { m@9: "collapsed": false m@9: }, m@9: "outputs": [ m@9: { m@9: "name": "stdout", m@9: "output_type": "stream", m@9: "text": [ m@9: "/import/c4dm-04/mariap/train_data_melodia_8.pickle\n" m@9: ] m@9: }, m@9: { m@9: "ename": "IOError", m@9: "evalue": "[Errno 2] No such file or directory: '/import/c4dm-04/mariap/train_data_melodia_8.pickle'", m@9: "output_type": "error", m@9: "traceback": [ m@9: "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", m@9: "\u001b[0;31mIOError\u001b[0m Traceback (most recent call last)", m@9: "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtrainset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtestset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmapper\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_train_val_test_sets\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", m@9: "\u001b[0;32m/Users/mariapanteli/Documents/QMUL/Code/MyPythonCode/plosone_underreview/scripts/map_and_average.pyc\u001b[0m in \u001b[0;36mload_train_val_test_sets\u001b[0;34m()\u001b[0m\n\u001b[1;32m 69\u001b[0m '''\n\u001b[1;32m 70\u001b[0m \u001b[0;32mprint\u001b[0m \u001b[0mINPUT_FILES\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 71\u001b[0;31m \u001b[0mtrainset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_data_from_pickle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mINPUT_FILES\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 72\u001b[0m \u001b[0mvalset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_data_from_pickle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mINPUT_FILES\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[0mtestset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_data_from_pickle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mINPUT_FILES\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", m@9: "\u001b[0;32m/Users/mariapanteli/Documents/QMUL/Code/MyPythonCode/plosone_underreview/scripts/map_and_average.pyc\u001b[0m in \u001b[0;36mload_data_from_pickle\u001b[0;34m(pickle_file)\u001b[0m\n\u001b[1;32m 56\u001b[0m '''load frame based features and labels from pickle file\n\u001b[1;32m 57\u001b[0m '''\n\u001b[0;32m---> 58\u001b[0;31m \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpickle_file\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m'rb'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 59\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlabels\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maudiolabels\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpickle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0;31m# remove 'unknown' and 'unidentified' country\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", m@9: "\u001b[0;31mIOError\u001b[0m: [Errno 2] No such file or directory: '/import/c4dm-04/mariap/train_data_melodia_8.pickle'" m@9: ] m@9: } m@9: ], m@9: "source": [ m@9: "trainset, valset, testset = mapper.load_train_val_test_sets()\n", m@9: "traindata, trainlabels, trainaudiolabels = trainset\n", m@9: "valdata, vallabels, valaudiolabels = valset\n", m@9: "testdata, testlabels, testaudiolabels = testset\n", m@9: "labels = np.concatenate((trainlabels, vallabels, testlabels)).ravel()\n", m@9: "audiolabels = np.concatenate((trainaudiolabels, valaudiolabels, testaudiolabels)).ravel()\n", m@9: "print traindata.shape, valdata.shape, testdata.shape" m@9: ] m@9: }, m@9: { m@9: "cell_type": "markdown", m@9: "metadata": {}, m@9: "source": [ m@9: "## explain LDA" m@9: ] m@9: }, m@9: { m@9: "cell_type": "code", m@9: "execution_count": null, m@9: "metadata": { m@9: "collapsed": true m@9: }, m@9: "outputs": [], m@9: "source": [ m@9: "min_variance = 0.99\n", m@9: "feat_labels, feat_inds = mapper.get_feat_inds(n_dim=traindata.shape[1])\n", m@9: "for i in range(len(feat_inds)):\n", m@9: " print \"mapping \" + feat_labels[i]\n", m@9: " inds = feat_inds[i]\n", m@9: " ssm_feat = util_feature_learning.Transformer()\n", m@9: " if min_variance is not None:\n", m@9: " ssm_feat.fit_data(traindata[:, inds], trainlabels, n_components=len(inds), pca_only=True)\n", m@9: " n_components = np.where(ssm_feat.pca_transformer.explained_variance_ratio_.cumsum()>min_variance)[0][0]+1\n", m@9: " print n_components, len(inds)\n", m@9: " ssm_feat.fit_lda_data(traindata[:, inds], trainlabels, n_components=n_components)\n", m@9: "\n", m@9: " WW = ssm_feat.lda_transformer.scalings_\n", m@9: " plt.figure()\n", m@9: " plt.imshow(WW[:, :n_components].T, aspect='auto')\n", m@9: " plt.colorbar()" m@9: ] m@9: }, m@9: { m@9: "cell_type": "markdown", m@9: "metadata": {}, m@9: "source": [ m@9: "## explain classifier" m@9: ] m@9: }, m@9: { m@9: "cell_type": "code", m@9: "execution_count": null, m@9: "metadata": { m@9: "collapsed": true m@9: }, m@9: "outputs": [], m@9: "source": [ m@9: "X_list, Y, Yaudio = pickle.load(open('../data/lda_data_melodia_8.pickle','rb'))\n", m@9: "Xrhy, Xmel, Xmfc, Xchr = X_list\n", m@9: "X = np.concatenate((Xrhy, Xmel, Xmfc, Xchr), axis=1)" m@9: ] m@9: }, m@9: { m@9: "cell_type": "code", m@9: "execution_count": null, m@9: "metadata": { m@9: "collapsed": true m@9: }, m@9: "outputs": [], m@9: "source": [ m@9: "ssm_feat.classify_and_save(X_train, Y_train, X_test, Y_test, transform_label=\" \")" m@9: ] m@9: }, m@9: { m@9: "cell_type": "code", m@9: "execution_count": null, m@9: "metadata": { m@9: "collapsed": true m@9: }, m@9: "outputs": [], m@9: "source": [ m@9: "def components_plot(lda_transformer, XX, n_comp=42, figurename=None):\n", m@9: " WW=lda_transformer.scalings_\n", m@9: " Xlda=lda_transformer.transform(XX)\n", m@9: " Xww=numpy.dot(XX, WW[:, :n_comp])\n", m@9: " plt.figure()\n", m@9: " plt.imshow(Xlda - Xww, aspect='auto')\n", m@9: " plt.figure()\n", m@9: " plt.imshow(Xlda, aspect='auto')\n", m@9: " plt.figure()\n", m@9: " plt.imshow(Xww, aspect='auto')\n", m@9: " plt.figure()\n", m@9: " plt.imshow(WW[:, :n_comp], aspect='auto') # this explains the weights up to n_components=64\n", m@9: " if figurename is not None:\n", m@9: " plt.savefig(figurename)\n", m@9: "\n", m@9: "XX = traindata[:, inds]\n", m@9: "components_plot(ssm_feat.lda_transformer, XX, n_comp=n_components)" m@9: ] m@9: } m@9: ], m@9: "metadata": { m@9: "kernelspec": { m@9: "display_name": "Python 2", m@9: "language": "python", m@9: "name": "python2" m@9: }, m@9: "language_info": { m@9: "codemirror_mode": { m@9: "name": "ipython", m@9: "version": 2 m@9: }, m@9: "file_extension": ".py", m@9: "mimetype": "text/x-python", m@9: "name": "python", m@9: "nbconvert_exporter": "python", m@9: "pygments_lexer": "ipython2", m@9: "version": "2.7.12" m@9: } m@9: }, m@9: "nbformat": 4, m@9: "nbformat_minor": 0 m@9: }