view notebooks/sensitivity_experiment.ipynb @ 19:0bba6f63f4fd branch-tests

some edits in notebook
author mpanteli <m.x.panteli@gmail.com>
date Wed, 13 Sep 2017 12:09:55 +0100
parents ed109218dd4b
children 206fdeff4641
line wrap: on
line source
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The autoreload extension is already loaded. To reload it, use:\n",
      "  %reload_ext autoreload\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "\n",
    "%matplotlib inline\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "\n",
    "import sys\n",
    "sys.path.append('../')\n",
    "import scripts.load_dataset as load_dataset\n",
    "import scripts.map_and_average as mapper\n",
    "import scripts.classification\n",
    "import scripts.outliers as outliers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "OUTPUT_FILES = load_dataset.OUTPUT_FILES\n",
    "n_iters = 10\n",
    "df = load_dataset.sample_dataset(csv_file=load_dataset.METADATA_FILE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "for n in range(n_iters):\n",
    "    print \"iteration %d\" % n\n",
    "    load_dataset.OUTPUT_FILES = [output_file.split('.pickle')[0]+'_'+str(n)+'.pickle' for \n",
    "                                 output_file in OUTPUT_FILES]\n",
    "    load_dataset.features_for_train_test_sets(df, write_output=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "for n in range(n_iters):\n",
    "    print \"iteration %d\" % n\n",
    "    \n",
    "    print \"mapping...\"\n",
    "    mapper.INPUT_FILES = [output_file.split('.pickle')[0]+'_'+str(n)+'.pickle' for \n",
    "                                 output_file in OUTPUT_FILES]\n",
    "    _, _, ldadata_list, _, _, Y, Yaudio = mapper.lda_map_and_average_frames(min_variance=0.99)\n",
    "    X = np.concatenate(ldadata_list)\n",
    "    \n",
    "    # classification and confusion\n",
    "    print \"classifying...\"\n",
    "    traininds, testinds = classification.get_train_test_indices()\n",
    "    X_train, Y_train, X_test, Y_test = classification.get_train_test_sets(X, Y, traininds, testinds)\n",
    "    accuracy, _ = classification.confusion_matrix(X_train, Y_train, X_test, Y_test, saveCF=False, plots=False)\n",
    "    print accuracy\n",
    "    \n",
    "    # outliers\n",
    "    print \"detecting outliers...\"\n",
    "    ddf = outliers.load_metadata(Yaudio, metadata_file=load_dataset.METADATA_FILE)\n",
    "    df_global, threshold, MD = get_outliers_df(X, Y, chi2thr=0.999)\n",
    "    print_most_least_outliers_topN(df_global, N=10)\n",
    "    \n",
    "    # write output\n",
    "    print \"writing file\"\n",
    "    df_global.to_csv('../data/outliers_'+str(n)+'.csv', index=False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}