diff notebooks/sensitivity_experiment.ipynb @ 42:90f8a2ea6f6f branch-tests

notebook results and load_features minor edits
author mpanteli <m.x.panteli@gmail.com>
date Fri, 15 Sep 2017 16:17:17 +0100
parents e4736064d282
children 081ff4ea7da7
line wrap: on
line diff
--- a/notebooks/sensitivity_experiment.ipynb	Fri Sep 15 12:27:11 2017 +0100
+++ b/notebooks/sensitivity_experiment.ipynb	Fri Sep 15 16:17:17 2017 +0100
@@ -2,7 +2,7 @@
  "cells": [
   {
    "cell_type": "code",
-   "execution_count": 58,
+   "execution_count": 15,
    "metadata": {},
    "outputs": [
     {
@@ -16,6 +16,7 @@
    ],
    "source": [
     "import numpy as np\n",
+    "import pandas as pd\n",
     "\n",
     "%matplotlib inline\n",
     "import matplotlib.pyplot as plt\n",
@@ -27,19 +28,20 @@
     "sys.path.append('../')\n",
     "import scripts.load_dataset as load_dataset\n",
     "import scripts.map_and_average as mapper\n",
-    "import scripts.classification\n",
+    "import scripts.classification as classification\n",
     "import scripts.outliers as outliers"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 46,
-   "metadata": {},
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "OUTPUT_FILES = load_dataset.OUTPUT_FILES\n",
-    "n_iters = 10\n",
-    "df = load_dataset.sample_dataset(csv_file=load_dataset.METADATA_FILE)"
+    "n_iters = 10"
    ]
   },
   {
@@ -59,6 +61,7 @@
     }
    ],
    "source": [
+    "df = load_dataset.sample_dataset(csv_file=load_dataset.METADATA_FILE)\n",
     "df.shape"
    ]
   },
@@ -4612,11 +4615,83 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "iteration 0\n",
+      "mapping...\n",
+      "/import/c4dm-04/mariap/train_data_melodia_8_0.pickle\n",
+      "(203219, 840) (68100, 840) (67143, 840)\n",
+      "mapping rhy\n",
+      "training with PCA transform...\n",
+      "variance explained 1.0\n",
+      "140 400\n",
+      "training with PCA transform...\n",
+      "variance explained 0.990203912455\n",
+      "training with LDA transform...\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/homes/mp305/anaconda/lib/python2.7/site-packages/sklearn/utils/validation.py:526: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n",
+      "  y = column_or_1d(y, warn=True)\n",
+      "/homes/mp305/anaconda/lib/python2.7/site-packages/sklearn/discriminant_analysis.py:455: UserWarning: The priors do not sum to 1. Renormalizing\n",
+      "  UserWarning)\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "variance explained 1.0\n",
+      "transform test data...\n",
+      "mapping mel\n",
+      "training with PCA transform...\n",
+      "variance explained 1.0\n",
+      "214 240\n",
+      "training with PCA transform...\n",
+      "variance explained 0.990094273777\n",
+      "training with LDA transform...\n",
+      "variance explained 1.0\n",
+      "transform test data...\n",
+      "mapping mfc\n",
+      "training with PCA transform...\n",
+      "variance explained 1.0\n",
+      "39 80\n",
+      "training with PCA transform...\n",
+      "variance explained 0.9914399357\n",
+      "training with LDA transform...\n",
+      "variance explained 0.941390777379\n",
+      "transform test data...\n",
+      "mapping chr\n",
+      "training with PCA transform...\n",
+      "variance explained 1.0\n",
+      "70 120\n",
+      "training with PCA transform...\n",
+      "variance explained 0.990511935176\n",
+      "training with LDA transform...\n",
+      "variance explained 0.953613938607\n",
+      "transform test data...\n"
+     ]
+    },
+    {
+     "ename": "ValueError",
+     "evalue": "all the input array dimensions except for the concatenation axis must match exactly",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
+      "\u001b[0;32m<ipython-input-3-971892d5bd8d>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      6\u001b[0m                                  output_file in OUTPUT_FILES]\n\u001b[1;32m      7\u001b[0m     \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mldadata_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mYaudio\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmapper\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlda_map_and_average_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmin_variance\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0.99\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m     \u001b[0mX\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconcatenate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mldadata_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     10\u001b[0m     \u001b[0;31m# classification and confusion\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+      "\u001b[0;31mValueError\u001b[0m: all the input array dimensions except for the concatenation axis must match exactly"
+     ]
+    }
+   ],
    "source": [
     "for n in range(n_iters):\n",
     "    print \"iteration %d\" % n\n",
@@ -4625,7 +4700,7 @@
     "    mapper.INPUT_FILES = [output_file.split('.pickle')[0]+'_'+str(n)+'.pickle' for \n",
     "                                 output_file in OUTPUT_FILES]\n",
     "    _, _, ldadata_list, _, _, Y, Yaudio = mapper.lda_map_and_average_frames(min_variance=0.99)\n",
-    "    X = np.concatenate(ldadata_list)\n",
+    "    X = np.concatenate(ldadata_list, axis=1)\n",
     "    \n",
     "    # classification and confusion\n",
     "    print \"classifying...\"\n",
@@ -4636,14 +4711,232 @@
     "    \n",
     "    # outliers\n",
     "    print \"detecting outliers...\"\n",
-    "    ddf = outliers.load_metadata(Yaudio, metadata_file=load_dataset.METADATA_FILE)\n",
-    "    df_global, threshold, MD = get_outliers_df(X, Y, chi2thr=0.999)\n",
-    "    print_most_least_outliers_topN(df_global, N=10)\n",
+    "    #ddf = outliers.load_metadata(Yaudio, metadata_file=load_dataset.METADATA_FILE)\n",
+    "    df_global, threshold, MD = outliers.get_outliers_df(X, Y, chi2thr=0.999)\n",
+    "    outliers.print_most_least_outliers_topN(df_global, N=10)\n",
     "    \n",
     "    # write output\n",
     "    print \"writing file\"\n",
     "    df_global.to_csv('../data/outliers_'+str(n)+'.csv', index=False)"
    ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "X = np.concatenate(ldadata_list, axis=1)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "(8089, 381)"
+      ]
+     },
+     "execution_count": 5,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "X.shape"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "0.176354062249\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/homes/mp305/anaconda/lib/python2.7/site-packages/sklearn/metrics/classification.py:1113: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n",
+      "  'precision', 'predicted', average, warn_for)\n"
+     ]
+    }
+   ],
+   "source": [
+    "#traininds, testinds = classification.get_train_test_indices()\n",
+    "traininds = np.arange(5000)\n",
+    "testinds = np.arange(len(X)-1600, len(X))\n",
+    "X_train, Y_train, X_test, Y_test = classification.get_train_test_sets(X, Y, traininds, testinds)\n",
+    "accuracy, _ = classification.confusion_matrix(X_train, Y_train, X_test, Y_test, saveCF=False, plots=False)\n",
+    "print accuracy"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "detecting outliers...\n",
+      "most outliers \n",
+      "            Country  Outliers\n",
+      "136        Botswana  0.590909\n",
+      "71      Ivory Coast  0.571429\n",
+      "86           Gambia  0.541667\n",
+      "43            Benin  0.538462\n",
+      "62             Fiji  0.466667\n",
+      "20         Pakistan  0.461538\n",
+      "65           Uganda  0.437500\n",
+      "14          Liberia  0.425000\n",
+      "78      El Salvador  0.424242\n",
+      "51   Western Sahara  0.421687\n",
+      "least outliers \n",
+      "            Country  Outliers\n",
+      "119         Denmark  0.000000\n",
+      "30      Afghanistan  0.000000\n",
+      "113         Iceland  0.000000\n",
+      "28       Tajikistan  0.000000\n",
+      "74   Czech Republic  0.000000\n",
+      "27      South Korea  0.000000\n",
+      "1         Lithuania  0.000000\n",
+      "15      Netherlands  0.014925\n",
+      "121          Poland  0.040000\n",
+      "134        Paraguay  0.043478\n"
+     ]
+    }
+   ],
+   "source": [
+    "print \"detecting outliers...\"\n",
+    "df_global, threshold, MD = outliers.get_outliers_df(X, Y, chi2thr=0.999)\n",
+    "outliers.print_most_least_outliers_topN(df_global, N=10)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## correlation of outlier results"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Let's use Kendal correlation to compare the ranked list of countries sorted by most to least outliers.\n",
+    "<br> First load the ranked list of outlier countries.\n",
+    "<br> Sort by outlier percentage in descending order."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "ranked_countries = pd.DataFrame()\n",
+    "ranked_outliers = pd.DataFrame()\n",
+    "for n in range(n_iters):\n",
+    "    df_global = pd.read_csv('../data/outliers_'+str(n)+'.csv')\n",
+    "    df_global = df_global.sort_values('Outliers', axis=0, ascending=False, inplace=True)\n",
+    "    ranked_countries = pd.concat([ranked_countries, df_global['Country']], axis=1)\n",
+    "    ranked_outliers = pd.concat([ranked_outliers, df_global['Outliers']], axis=1)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Remove countries with 0% outliers as these are in random (probably alphabetical) order."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "zero_idx = np.where(np.sum(ranked_outliers, axis=1)==0)[0]\n",
+    "first_zero_idx = np.min(zero_idx)\n",
+    "ranked_countries = ranked_countries.iloc[:first_zero_idx, :]\n",
+    "ranked_outliers = ranked_outliers.iloc[:first_zero_idx, :]\n",
+    "\n",
+    "print ranked_countries.head()\n",
+    "print ranked_outliers.head()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "And now kendalltau correlation"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 33,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "KendalltauResult(correlation=0.99999999999999989, pvalue=2.5428927239036995e-67)\n"
+     ]
+    }
+   ],
+   "source": [
+    "from scipy.stats import kendalltau\n",
+    "for i in range(len(ranked_countries)-1):\n",
+    "    for j in range(i+1, len(ranked_countries)):\n",
+    "        print kendalltau(ranked_countries.iloc[:, i], ranked_countries.iloc[:, j])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 34,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "SpearmanrResult(correlation=1.0, pvalue=0.0)"
+      ]
+     },
+     "execution_count": 34,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "from scipy.stats import spearmanr\n",
+    "spearmanr(ranked_countries)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
   }
  ],
  "metadata": {