m@15: { m@15: "cells": [ m@15: { m@15: "cell_type": "code", m@15: "execution_count": 1, m@15: "metadata": { m@15: "collapsed": false m@15: }, m@15: "outputs": [ m@15: { m@15: "name": "stderr", m@15: "output_type": "stream", m@15: "text": [ m@15: "/Users/mariapanteli/anaconda/lib/python2.7/site-packages/librosa/core/audio.py:33: UserWarning: Could not import scikits.samplerate. Falling back to scipy.signal\n", m@15: " warnings.warn('Could not import scikits.samplerate. '\n" m@15: ] m@15: } m@15: ], m@15: "source": [ m@15: "import numpy as np\n", m@15: "\n", m@15: "%matplotlib inline\n", m@15: "import matplotlib.pyplot as plt\n", m@15: "\n", m@15: "%load_ext autoreload\n", m@15: "%autoreload 2\n", m@15: "\n", m@15: "import sys\n", m@15: "sys.path.append('../')\n", m@15: "import scripts.load_dataset as load_dataset\n", m@15: "import scripts.map_and_average as mapper\n", m@15: "import scripts.results_classification as results_class\n", m@15: "import scripts.results as results" m@15: ] m@15: }, m@15: { m@15: "cell_type": "code", m@15: "execution_count": 2, m@15: "metadata": { m@15: "collapsed": true m@15: }, m@15: "outputs": [], m@15: "source": [ m@15: "OUTPUT_FILES = ['/import/c4dm-04/mariap/train_data_melodia_'+str(WIN_SIZE)+'.pickle', \n", m@15: " '/import/c4dm-04/mariap/val_data_melodia_'+str(WIN_SIZE)+'.pickle', \n", m@15: " '/import/c4dm-04/mariap/test_data_melodia_'+str(WIN_SIZE)+'.pickle']\n", m@15: "n_iters = 10" m@15: ] m@15: }, m@15: { m@15: "cell_type": "code", m@15: "execution_count": 5, m@15: "metadata": { m@15: "collapsed": false m@15: }, m@15: "outputs": [ m@15: { m@15: "ename": "IOError", m@15: "evalue": "File data/metadata_BLSM_language_all.csv does not exist", m@15: "output_type": "error", m@15: "traceback": [ m@15: "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", m@15: "\u001b[0;31mIOError\u001b[0m Traceback (most recent call last)", m@15: "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mn\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mn_iters\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msample_dataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcsv_file\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mload_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mMETADATA_FILE\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m load_dataset.OUTPUT_FILES = [output_file.split('.pickle')[0]+'_'+str(n)+'.pickle' for \n\u001b[1;32m 4\u001b[0m output_file in load_dataset.OUTPUT_FILES]\n\u001b[1;32m 5\u001b[0m \u001b[0mload_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfeatures_for_train_test_sets\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwrite_output\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", m@15: "\u001b[0;32m/Users/mariapanteli/Documents/QMUL/Code/MyPythonCode/plosone_underreview/scripts/load_dataset.py\u001b[0m in \u001b[0;36msample_dataset\u001b[0;34m(csv_file)\u001b[0m\n\u001b[1;32m 130\u001b[0m \u001b[0mThe\u001b[0m \u001b[0mmetadata\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mselected\u001b[0m \u001b[0msubset\u001b[0m \u001b[0mof\u001b[0m \u001b[0mtracks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 131\u001b[0m \"\"\"\n\u001b[0;32m--> 132\u001b[0;31m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcsv_file\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 133\u001b[0m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mutil_filter_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mremove_missing_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 134\u001b[0m \u001b[0msubset_idx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msubset_labels\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'Country'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_values\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", m@15: "\u001b[0;32m/Users/mariapanteli/anaconda/lib/python2.7/site-packages/pandas/io/parsers.pyc\u001b[0m in \u001b[0;36mparser_f\u001b[0;34m(filepath_or_buffer, sep, dialect, compression, doublequote, escapechar, quotechar, quoting, skipinitialspace, lineterminator, header, index_col, names, prefix, skiprows, skipfooter, skip_footer, na_values, na_fvalues, true_values, false_values, delimiter, converters, dtype, usecols, engine, delim_whitespace, as_recarray, na_filter, compact_ints, use_unsigned, low_memory, buffer_lines, warn_bad_lines, error_bad_lines, keep_default_na, thousands, comment, decimal, parse_dates, keep_date_col, dayfirst, date_parser, memory_map, float_precision, nrows, iterator, chunksize, verbose, encoding, squeeze, mangle_dupe_cols, tupleize_cols, infer_datetime_format, skip_blank_lines)\u001b[0m\n\u001b[1;32m 463\u001b[0m skip_blank_lines=skip_blank_lines)\n\u001b[1;32m 464\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 465\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 466\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 467\u001b[0m \u001b[0mparser_f\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", m@15: "\u001b[0;32m/Users/mariapanteli/anaconda/lib/python2.7/site-packages/pandas/io/parsers.pyc\u001b[0m in \u001b[0;36m_read\u001b[0;34m(filepath_or_buffer, kwds)\u001b[0m\n\u001b[1;32m 239\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 240\u001b[0m \u001b[0;31m# Create the parser.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 241\u001b[0;31m \u001b[0mparser\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mTextFileReader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 242\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mnrows\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mchunksize\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", m@15: "\u001b[0;32m/Users/mariapanteli/anaconda/lib/python2.7/site-packages/pandas/io/parsers.pyc\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, f, engine, **kwds)\u001b[0m\n\u001b[1;32m 555\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'has_index_names'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'has_index_names'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 556\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 557\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_make_engine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mengine\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 558\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 559\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_get_options_with_defaults\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mengine\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", m@15: "\u001b[0;32m/Users/mariapanteli/anaconda/lib/python2.7/site-packages/pandas/io/parsers.pyc\u001b[0m in \u001b[0;36m_make_engine\u001b[0;34m(self, engine)\u001b[0m\n\u001b[1;32m 692\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_make_engine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mengine\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'c'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 693\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mengine\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'c'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 694\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_engine\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mCParserWrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 695\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 696\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mengine\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'python'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", m@15: "\u001b[0;32m/Users/mariapanteli/anaconda/lib/python2.7/site-packages/pandas/io/parsers.pyc\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, src, **kwds)\u001b[0m\n\u001b[1;32m 1059\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'allow_leading_cols'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mindex_col\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1060\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1061\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_reader\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_parser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTextReader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msrc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1062\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1063\u001b[0m \u001b[0;31m# XXX\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", m@15: "\u001b[0;32mpandas/parser.pyx\u001b[0m in \u001b[0;36mpandas.parser.TextReader.__cinit__ (pandas/parser.c:3163)\u001b[0;34m()\u001b[0m\n", m@15: "\u001b[0;32mpandas/parser.pyx\u001b[0m in \u001b[0;36mpandas.parser.TextReader._setup_parser_source (pandas/parser.c:5779)\u001b[0;34m()\u001b[0m\n", m@15: "\u001b[0;31mIOError\u001b[0m: File data/metadata_BLSM_language_all.csv does not exist" m@15: ] m@15: } m@15: ], m@15: "source": [ m@15: "for n in range(n_iters):\n", m@15: " print \"iteration %d\" % n\n", m@15: " df = load_dataset.sample_dataset(csv_file=load_dataset.METADATA_FILE)\n", m@15: " load_dataset.OUTPUT_FILES = [output_file.split('.pickle')[0]+'_'+str(n)+'.pickle' for \n", m@15: " output_file in OUTPUT_FILES]\n", m@15: " load_dataset.features_for_train_test_sets(df, write_output=True)" m@15: ] m@15: }, m@15: { m@15: "cell_type": "code", m@15: "execution_count": null, m@15: "metadata": { m@15: "collapsed": true m@15: }, m@15: "outputs": [], m@15: "source": [ m@15: "for n in range(n_iters):\n", m@15: " print \"iteration %d\" % n\n", m@15: " \n", m@15: " print \"mapping...\"\n", m@15: " mapper.INPUT_FILES = [output_file.split('.pickle')[0]+'_'+str(n)+'.pickle' for \n", m@15: " output_file in OUTPUT_FILES]\n", m@15: " _, _, ldadata_list, _, _, Y, Yaudio = mapper.lda_map_and_average_frames(min_variance=0.99)\n", m@15: " X = np.concatenate(ldadata_list)\n", m@15: " \n", m@15: " # classification and confusion\n", m@15: " print \"classifying...\"\n", m@15: " traininds, testinds = results_class.get_train_test_indices()\n", m@15: " X_train, Y_train, X_test, Y_test = results_class.get_train_test_sets(X, Y, traininds, testinds)\n", m@15: " accuracy, _ = results_class.confusion_matrix(X_train, Y_train, X_test, Y_test, saveCF=False, plots=False)\n", m@15: " print accuracy\n", m@15: " \n", m@15: " # outliers\n", m@15: " print \"detecting outliers...\"\n", m@15: " ddf = results.load_metadata(Yaudio, metadata_file=load_dataset.METADATA_FILE)\n", m@15: " df_global, threshold, MD = get_outliers_df(X, Y, chi2thr=0.999)\n", m@15: " print_most_least_outliers_topN(df_global, N=10)\n", m@15: " \n", m@15: " # write output\n", m@15: " print \"writing file\"\n", m@15: " df_global.to_csv('../data/outliers_'+str(n)+'.csv', index=False)" m@15: ] m@15: } m@15: ], m@15: "metadata": { m@15: "kernelspec": { m@15: "display_name": "Python 2", m@15: "language": "python", m@15: "name": "python2" m@15: }, m@15: "language_info": { m@15: "codemirror_mode": { m@15: "name": "ipython", m@15: "version": 2 m@15: }, m@15: "file_extension": ".py", m@15: "mimetype": "text/x-python", m@15: "name": "python", m@15: "nbconvert_exporter": "python", m@15: "pygments_lexer": "ipython2", m@15: "version": "2.7.12" m@15: } m@15: }, m@15: "nbformat": 4, m@15: "nbformat_minor": 0 m@15: }