annotate scripts/results_classification.py @ 32:e732f41ec019 branch-tests

sensitivity.py
author mpanteli <m.x.panteli@gmail.com>
date Wed, 13 Sep 2017 19:57:32 +0100
parents da5ba7926b82
children
rev   line source
m@16 1 # -*- coding: utf-8 -*-
m@16 2 """
m@16 3 Created on Thu Nov 10 15:10:32 2016
m@16 4
m@16 5 @author: mariapanteli
m@16 6 """
m@16 7 import numpy as np
m@16 8 import pandas as pd
m@16 9 from sklearn import metrics
m@16 10
m@16 11 import map_and_average
m@16 12 import util_feature_learning
m@16 13
m@16 14
m@16 15 FILENAMES = map_and_average.OUTPUT_FILES
m@16 16
m@16 17
m@16 18 def load_data_from_pickle(filename):
m@16 19 X_list, Y, Yaudio = pickle.load(open(filename,'rb'))
m@16 20 X = np.concatenate(data_list, axis=1)
m@16 21 return X, Y, Yaudio
m@16 22
m@16 23
m@16 24 def get_train_test_indices():
m@16 25 trainset, valset, testset = map_and_average.load_train_val_test_sets()
m@16 26 trainaudiolabels, testaudiolabels = trainset[2], testset[2]
m@16 27 # train, test indices
m@16 28 aa_train = np.unique(trainaudiolabels)
m@16 29 aa_test = np.unique(testaudiolabels)
m@16 30 traininds = np.array([i for i, item in enumerate(audiolabs) if item in aa_train])
m@16 31 testinds = np.array([i for i, item in enumerate(audiolabs) if item in aa_test])
m@16 32 return traininds, testinds
m@16 33
m@16 34
m@16 35 def get_train_test_sets(X, Y, traininds, testinds):
m@16 36 X_train = X[traininds, :]
m@16 37 Y_train = Y[traininds]
m@16 38 X_test = X[testinds, :]
m@16 39 Y_test = Y[testinds]
m@16 40 return X_train, Y_train, X_test, Y_test
m@16 41
m@16 42
m@16 43 def classify_for_filenames(file_list=FILENAMES):
m@16 44 df_results = pd.DataFrame()
m@16 45 feat_learner = util_feature_learning.Transformer()
m@16 46 for filename in file_list:
m@16 47 X, Y, Yaudio = load_data_from_pickle(filename)
m@16 48 traininds, testinds = get_train_test_indices()
m@16 49 X_train, Y_train, X_test, Y_test = get_train_test_sets(X, Y, traininds, testinds)
m@16 50 df_result = feat_learner.classify(X_train, Y_train, X_test, Y_test)
m@16 51 df_results = pd.concat([df_results, df_result], axis=0, ignore_index=True)
m@16 52 return df_results
m@16 53
m@16 54
m@16 55 def plot_CF(CF, labels=None, figurename=None):
m@16 56 labels[labels=='United States of America'] = 'United States Amer.'
m@16 57 plt.imshow(CF, cmap="Greys")
m@16 58 plt.xticks(range(len(labels)), labels, rotation='vertical', fontsize=4)
m@16 59 plt.yticks(range(len(labels)), labels, fontsize=4)
m@16 60 if figurename is not None:
m@16 61 plt.savefig(figurename, bbox_inches='tight')
m@16 62
m@16 63
m@16 64 def confusion_matrix(X_train, Y_train, X_test, Y_test, saveCF=False, plots=False):
m@16 65 feat_learner = util_feature_learning.Transformer()
m@16 66 accuracy, predictions = util_feature_learning.classification_accuracy(X_train, Y_train,
m@16 67 X_test, Y_test, model=util_feature_learning.modelLDA)
m@16 68 labels = np.unique(Y_test) # TODO: countries in geographical proximity
m@16 69 CF = metrics.confusion_matrix(Y_test, predictions, labels=labels)
m@16 70 if saveCF:
m@16 71 np.savetxt('data/CFlabels.csv', labels, fmt='%s')
m@16 72 np.savetxt('data/CF.csv', CF, fmt='%10.5f')
m@16 73 if plots:
m@16 74 plot_CF(CF, labels=labels, figurename='data/conf_matrix.pdf')
m@16 75 return accuracy, predictions
m@16 76
m@16 77
m@16 78 if __name__ == '__main__':
m@16 79 df_results = classify_for_filenames(file_list=FILENAMES)
m@16 80 max_i = np.argmax(df_results[:, 1])
m@16 81 feat_learning_i = max_i % 4 # 4 classifiers for each feature learning method
m@16 82 filename = FILENAMES[feat_learning_i]
m@16 83 X, Y, Yaudio = load_data_from_pickle(filename)
m@16 84 traininds, testinds = get_train_test_indices()
m@16 85 X_train, Y_train, X_test, Y_test = get_train_test_sets(X, Y, traininds, testinds)
m@16 86 confusion_matrix(X_train, Y_train, X_test, Y_test, saveCF=True, plots=True)
m@16 87