annotate scripts/results.py @ 14:088b5547e094 branch-tests

Merge
author Maria Panteli <m.x.panteli@gmail.com>
date Tue, 12 Sep 2017 18:03:56 +0100
parents 98718fdd8326
children
rev   line source
Maria@4 1 # -*- coding: utf-8 -*-
Maria@4 2 """
Maria@4 3 Created on Tue Jul 12 20:49:48 2016
Maria@4 4
Maria@4 5 @author: mariapanteli
Maria@4 6 """
Maria@4 7
Maria@4 8 import numpy as np
Maria@4 9 import pandas as pd
Maria@4 10 import pickle
Maria@4 11 from collections import Counter
Maria@4 12 from sklearn.cluster import KMeans
Maria@4 13
Maria@4 14 import utils
Maria@4 15 import utils_spatial
Maria@4 16
Maria@4 17
Maria@4 18 def country_outlier_df(counts, labels, out_file=None, normalize=False):
Maria@4 19 if len(counts.keys()) < len(np.unique(labels)):
Maria@4 20 for label in np.unique(labels):
Maria@4 21 if not counts.has_key(label):
Maria@4 22 counts.update({label:0})
Maria@4 23 if normalize is True:
Maria@4 24 counts = normalize_outlier_counts(counts, Counter(labels))
Maria@4 25 df = pd.DataFrame.from_dict(counts, orient='index').reset_index()
Maria@4 26 df.rename(columns={'index':'Country', 0:'Outliers'}, inplace=True)
Maria@4 27 if out_file is not None:
Maria@4 28 df.to_csv(out_file, index=False)
Maria@4 29 return df
Maria@4 30
Maria@4 31
Maria@4 32 def normalize_outlier_counts(outlier_counts, country_counts):
Maria@4 33 '''Normalize a dictionary of outlier counts per country by
Maria@4 34 the total number of recordings per country
Maria@4 35 '''
Maria@4 36 for key in outlier_counts.keys():
Maria@4 37 # dictionaries should have the same keys
Maria@4 38 outlier_counts[key] = float(outlier_counts[key]) / float(country_counts[key])
Maria@4 39 return outlier_counts
Maria@4 40
Maria@4 41
Maria@4 42 def get_outliers_df(X, Y, chi2thr=0.999, out_file=None):
Maria@4 43 threshold, y_pred, MD = utils.get_outliers_Mahal(X, chi2thr=chi2thr)
Maria@4 44 global_counts = Counter(Y[y_pred])
Maria@4 45 df = country_outlier_df(global_counts, Y, normalize=True)
Maria@4 46 if out_file is not None:
Maria@4 47 df.to_csv(out_file, index=False)
Maria@4 48 return df, threshold, MD
Maria@4 49
Maria@4 50
Maria@4 51 def print_most_least_outliers_topN(df, N=10):
Maria@4 52 sort_inds = df['Outliers'].argsort() # ascending order
Maria@4 53 df_most = df[['Country', 'Outliers']].iloc[sort_inds[::-1][:N]]
Maria@4 54 df_least = df[['Country', 'Outliers']].iloc[sort_inds[:N]]
Maria@4 55 print "most outliers "
Maria@4 56 print df_most
Maria@4 57 print "least outliers "
Maria@4 58 print df_least
Maria@4 59
Maria@4 60
Maria@4 61 def load_metadata(Yaudio, metadata_file):
Maria@4 62 df = pd.read_csv(metadata_file)
Maria@4 63 df_audio = pd.DataFrame({'Audio':Yaudio})
Maria@4 64 ddf = pd.merge(df_audio, df, on='Audio', suffixes=['', '_r']) # in the order of Yaudio
Maria@4 65 return ddf
Maria@4 66
Maria@4 67
Maria@4 68 def clusters_metadata(df, cl_pred, out_file=None):
Maria@4 69 def get_top_N_counts(labels, N=3):
Maria@4 70 ulab, ucount = np.unique(labels, return_counts=True)
Maria@4 71 inds = np.argsort(ucount)
Maria@4 72 return zip(ulab[inds[-N:]],ucount[inds[-N:]])
Maria@4 73 info = np.array([str(df['Country'].iloc[i]) for i in range(len(df))])
Maria@4 74 styles_description = []
Maria@4 75 uniq_cl = np.unique(cl_pred)
Maria@4 76 for ccl in uniq_cl:
Maria@4 77 inds = np.where(cl_pred==ccl)[0]
Maria@4 78 styles_description.append(get_top_N_counts(info[inds], N=3))
Maria@4 79 df_styles = pd.DataFrame(data=styles_description, index=uniq_cl)
Maria@4 80 print df_styles.to_latex()
Maria@4 81 if out_file is not None:
Maria@4 82 df_styles.to_csv(out_file, index=False)
Maria@4 83
Maria@4 84
m@8 85 if __name__ == '__main__':
m@8 86 # load LDA-transformed frames
m@8 87 X_list, Y, Yaudio = pickle.load(open('data/lda_data_8.pickle','rb'))
m@8 88 ddf = load_metadata(Yaudio, metadata_file='data/metadata.csv')
m@8 89 w, data_countries = utils_spatial.get_neighbors_for_countries_in_dataset(Y)
m@8 90 w_dict = utils_spatial.from_weights_to_dict(w, data_countries)
m@13 91 X = np.concatenate(X_list, axis=1)
Maria@4 92
m@8 93 # global outliers
m@8 94 df_global, threshold, MD = get_outliers_df(X, Y, chi2thr=0.999)
m@8 95 print_most_least_outliers_topN(df_global, N=10)
Maria@4 96
m@8 97 spatial_outliers = utils.get_local_outliers_from_neighbors_dict(X, Y, w_dict, chi2thr=0.999, do_pca=True)
m@8 98 spatial_counts = Counter(dict([(ll[0],ll[1]) for ll in spatial_outliers]))
m@8 99 df_local = country_outlier_df(spatial_counts, Y, normalize=True)
m@8 100 print_most_least_outliers_topN(df_local, N=10)
Maria@4 101
m@8 102 feat = [Xrhy, Xmel, Xmfc, Xchr]
m@8 103 feat_labels = ['rhy', 'mel', 'mfc', 'chr']
m@8 104 tabs_feat = []
m@8 105 for i in range(len(feat)):
m@8 106 XX = feat[i]
m@8 107 df_feat, threshold, MD = get_outliers_df(XX, Y, chi2thr=0.999)
m@8 108 print_most_least_outliers_topN(df_feat, N=5)
Maria@4 109
m@8 110 # how many styles are there
m@8 111 #bestncl, ave_silh = utils.best_n_clusters_silhouette(X, min_ncl=5, max_ncl=50, metric="cosine")
m@8 112 bestncl = 13
Maria@4 113
m@8 114 # get cluster predictions and metadata for each cluster
m@8 115 cluster_model = KMeans(n_clusters=bestncl, random_state=50).fit(X)
m@8 116 centroids = cluster_model.cluster_centers_
m@8 117 cl_pred = cluster_model.predict(X)
m@8 118 ddf['Clusters'] = cl_pred
m@8 119 clusters_metadata(ddf, cl_pred)
Maria@4 120
m@8 121 # how similar are the cultures and which ones seem to be global outliers
m@8 122 cluster_freq = utils.get_cluster_freq_linear(X, Y, centroids)
Maria@4 123
m@8 124 # Moran on Mahalanobis distances
m@8 125 data = cluster_freq.get_values()
m@8 126 data_countries = cluster_freq.index
m@8 127 #threshold, y_pred, MD = utils.get_outliers_Mahal(data, chi2thr=0.999)
m@8 128 threshold, y_pred, MD = utils.get_outliers(data, chi2thr=0.999)
m@8 129 y = np.sqrt(MD)
m@8 130 utils_spatial.print_Moran_outliers(y, w, data_countries)
m@8 131 utils_spatial.plot_Moran_scatterplot(y, w, data_countries)