Mercurial > hg > plosone_underreview
comparison notebooks/sensitivity_experiment.ipynb @ 15:9847b954c217 branch-tests
added sensitivity experiment
author | Maria Panteli <m.x.panteli@gmail.com> |
---|---|
date | Tue, 12 Sep 2017 19:03:48 +0100 |
parents | |
children | 2e487b9c0a7b |
comparison
equal
deleted
inserted
replaced
14:088b5547e094 | 15:9847b954c217 |
---|---|
1 { | |
2 "cells": [ | |
3 { | |
4 "cell_type": "code", | |
5 "execution_count": 1, | |
6 "metadata": { | |
7 "collapsed": false | |
8 }, | |
9 "outputs": [ | |
10 { | |
11 "name": "stderr", | |
12 "output_type": "stream", | |
13 "text": [ | |
14 "/Users/mariapanteli/anaconda/lib/python2.7/site-packages/librosa/core/audio.py:33: UserWarning: Could not import scikits.samplerate. Falling back to scipy.signal\n", | |
15 " warnings.warn('Could not import scikits.samplerate. '\n" | |
16 ] | |
17 } | |
18 ], | |
19 "source": [ | |
20 "import numpy as np\n", | |
21 "\n", | |
22 "%matplotlib inline\n", | |
23 "import matplotlib.pyplot as plt\n", | |
24 "\n", | |
25 "%load_ext autoreload\n", | |
26 "%autoreload 2\n", | |
27 "\n", | |
28 "import sys\n", | |
29 "sys.path.append('../')\n", | |
30 "import scripts.load_dataset as load_dataset\n", | |
31 "import scripts.map_and_average as mapper\n", | |
32 "import scripts.results_classification as results_class\n", | |
33 "import scripts.results as results" | |
34 ] | |
35 }, | |
36 { | |
37 "cell_type": "code", | |
38 "execution_count": 2, | |
39 "metadata": { | |
40 "collapsed": true | |
41 }, | |
42 "outputs": [], | |
43 "source": [ | |
44 "OUTPUT_FILES = ['/import/c4dm-04/mariap/train_data_melodia_'+str(WIN_SIZE)+'.pickle', \n", | |
45 " '/import/c4dm-04/mariap/val_data_melodia_'+str(WIN_SIZE)+'.pickle', \n", | |
46 " '/import/c4dm-04/mariap/test_data_melodia_'+str(WIN_SIZE)+'.pickle']\n", | |
47 "n_iters = 10" | |
48 ] | |
49 }, | |
50 { | |
51 "cell_type": "code", | |
52 "execution_count": 5, | |
53 "metadata": { | |
54 "collapsed": false | |
55 }, | |
56 "outputs": [ | |
57 { | |
58 "ename": "IOError", | |
59 "evalue": "File data/metadata_BLSM_language_all.csv does not exist", | |
60 "output_type": "error", | |
61 "traceback": [ | |
62 "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", | |
63 "\u001b[0;31mIOError\u001b[0m Traceback (most recent call last)", | |
64 "\u001b[0;32m<ipython-input-5-8d1030af886f>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mn\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mn_iters\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msample_dataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcsv_file\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mload_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mMETADATA_FILE\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m load_dataset.OUTPUT_FILES = [output_file.split('.pickle')[0]+'_'+str(n)+'.pickle' for \n\u001b[1;32m 4\u001b[0m output_file in load_dataset.OUTPUT_FILES]\n\u001b[1;32m 5\u001b[0m \u001b[0mload_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfeatures_for_train_test_sets\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwrite_output\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
65 "\u001b[0;32m/Users/mariapanteli/Documents/QMUL/Code/MyPythonCode/plosone_underreview/scripts/load_dataset.py\u001b[0m in \u001b[0;36msample_dataset\u001b[0;34m(csv_file)\u001b[0m\n\u001b[1;32m 130\u001b[0m \u001b[0mThe\u001b[0m \u001b[0mmetadata\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mselected\u001b[0m \u001b[0msubset\u001b[0m \u001b[0mof\u001b[0m \u001b[0mtracks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 131\u001b[0m \"\"\"\n\u001b[0;32m--> 132\u001b[0;31m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcsv_file\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 133\u001b[0m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mutil_filter_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mremove_missing_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 134\u001b[0m \u001b[0msubset_idx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msubset_labels\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'Country'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_values\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
66 "\u001b[0;32m/Users/mariapanteli/anaconda/lib/python2.7/site-packages/pandas/io/parsers.pyc\u001b[0m in \u001b[0;36mparser_f\u001b[0;34m(filepath_or_buffer, sep, dialect, compression, doublequote, escapechar, quotechar, quoting, skipinitialspace, lineterminator, header, index_col, names, prefix, skiprows, skipfooter, skip_footer, na_values, na_fvalues, true_values, false_values, delimiter, converters, dtype, usecols, engine, delim_whitespace, as_recarray, na_filter, compact_ints, use_unsigned, low_memory, buffer_lines, warn_bad_lines, error_bad_lines, keep_default_na, thousands, comment, decimal, parse_dates, keep_date_col, dayfirst, date_parser, memory_map, float_precision, nrows, iterator, chunksize, verbose, encoding, squeeze, mangle_dupe_cols, tupleize_cols, infer_datetime_format, skip_blank_lines)\u001b[0m\n\u001b[1;32m 463\u001b[0m skip_blank_lines=skip_blank_lines)\n\u001b[1;32m 464\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 465\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 466\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 467\u001b[0m \u001b[0mparser_f\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
67 "\u001b[0;32m/Users/mariapanteli/anaconda/lib/python2.7/site-packages/pandas/io/parsers.pyc\u001b[0m in \u001b[0;36m_read\u001b[0;34m(filepath_or_buffer, kwds)\u001b[0m\n\u001b[1;32m 239\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 240\u001b[0m \u001b[0;31m# Create the parser.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 241\u001b[0;31m \u001b[0mparser\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mTextFileReader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 242\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mnrows\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mchunksize\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
68 "\u001b[0;32m/Users/mariapanteli/anaconda/lib/python2.7/site-packages/pandas/io/parsers.pyc\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, f, engine, **kwds)\u001b[0m\n\u001b[1;32m 555\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'has_index_names'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'has_index_names'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 556\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 557\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_make_engine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mengine\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 558\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 559\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_get_options_with_defaults\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mengine\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
69 "\u001b[0;32m/Users/mariapanteli/anaconda/lib/python2.7/site-packages/pandas/io/parsers.pyc\u001b[0m in \u001b[0;36m_make_engine\u001b[0;34m(self, engine)\u001b[0m\n\u001b[1;32m 692\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_make_engine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mengine\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'c'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 693\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mengine\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'c'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 694\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_engine\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mCParserWrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 695\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 696\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mengine\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'python'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
70 "\u001b[0;32m/Users/mariapanteli/anaconda/lib/python2.7/site-packages/pandas/io/parsers.pyc\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, src, **kwds)\u001b[0m\n\u001b[1;32m 1059\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'allow_leading_cols'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mindex_col\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1060\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1061\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_reader\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_parser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTextReader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msrc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1062\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1063\u001b[0m \u001b[0;31m# XXX\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
71 "\u001b[0;32mpandas/parser.pyx\u001b[0m in \u001b[0;36mpandas.parser.TextReader.__cinit__ (pandas/parser.c:3163)\u001b[0;34m()\u001b[0m\n", | |
72 "\u001b[0;32mpandas/parser.pyx\u001b[0m in \u001b[0;36mpandas.parser.TextReader._setup_parser_source (pandas/parser.c:5779)\u001b[0;34m()\u001b[0m\n", | |
73 "\u001b[0;31mIOError\u001b[0m: File data/metadata_BLSM_language_all.csv does not exist" | |
74 ] | |
75 } | |
76 ], | |
77 "source": [ | |
78 "for n in range(n_iters):\n", | |
79 " print \"iteration %d\" % n\n", | |
80 " df = load_dataset.sample_dataset(csv_file=load_dataset.METADATA_FILE)\n", | |
81 " load_dataset.OUTPUT_FILES = [output_file.split('.pickle')[0]+'_'+str(n)+'.pickle' for \n", | |
82 " output_file in OUTPUT_FILES]\n", | |
83 " load_dataset.features_for_train_test_sets(df, write_output=True)" | |
84 ] | |
85 }, | |
86 { | |
87 "cell_type": "code", | |
88 "execution_count": null, | |
89 "metadata": { | |
90 "collapsed": true | |
91 }, | |
92 "outputs": [], | |
93 "source": [ | |
94 "for n in range(n_iters):\n", | |
95 " print \"iteration %d\" % n\n", | |
96 " \n", | |
97 " print \"mapping...\"\n", | |
98 " mapper.INPUT_FILES = [output_file.split('.pickle')[0]+'_'+str(n)+'.pickle' for \n", | |
99 " output_file in OUTPUT_FILES]\n", | |
100 " _, _, ldadata_list, _, _, Y, Yaudio = mapper.lda_map_and_average_frames(min_variance=0.99)\n", | |
101 " X = np.concatenate(ldadata_list)\n", | |
102 " \n", | |
103 " # classification and confusion\n", | |
104 " print \"classifying...\"\n", | |
105 " traininds, testinds = results_class.get_train_test_indices()\n", | |
106 " X_train, Y_train, X_test, Y_test = results_class.get_train_test_sets(X, Y, traininds, testinds)\n", | |
107 " accuracy, _ = results_class.confusion_matrix(X_train, Y_train, X_test, Y_test, saveCF=False, plots=False)\n", | |
108 " print accuracy\n", | |
109 " \n", | |
110 " # outliers\n", | |
111 " print \"detecting outliers...\"\n", | |
112 " ddf = results.load_metadata(Yaudio, metadata_file=load_dataset.METADATA_FILE)\n", | |
113 " df_global, threshold, MD = get_outliers_df(X, Y, chi2thr=0.999)\n", | |
114 " print_most_least_outliers_topN(df_global, N=10)\n", | |
115 " \n", | |
116 " # write output\n", | |
117 " print \"writing file\"\n", | |
118 " df_global.to_csv('../data/outliers_'+str(n)+'.csv', index=False)" | |
119 ] | |
120 } | |
121 ], | |
122 "metadata": { | |
123 "kernelspec": { | |
124 "display_name": "Python 2", | |
125 "language": "python", | |
126 "name": "python2" | |
127 }, | |
128 "language_info": { | |
129 "codemirror_mode": { | |
130 "name": "ipython", | |
131 "version": 2 | |
132 }, | |
133 "file_extension": ".py", | |
134 "mimetype": "text/x-python", | |
135 "name": "python", | |
136 "nbconvert_exporter": "python", | |
137 "pygments_lexer": "ipython2", | |
138 "version": "2.7.12" | |
139 } | |
140 }, | |
141 "nbformat": 4, | |
142 "nbformat_minor": 0 | |
143 } |