Mercurial > hg > plosone_underreview
diff scripts/classification.py @ 47:081ff4ea7da7 branch-tests
sensitivity experiment split
author | Maria Panteli <m.x.panteli@gmail.com> |
---|---|
date | Fri, 15 Sep 2017 17:33:14 +0100 |
parents | ef829b187308 |
children | 08b9327f1935 |
line wrap: on
line diff
--- a/scripts/classification.py Fri Sep 15 16:34:30 2017 +0100 +++ b/scripts/classification.py Fri Sep 15 17:33:14 2017 +0100 @@ -45,11 +45,25 @@ feat_learner = util_feature_learning.Transformer() for filename in file_list: X, Y, Yaudio = load_data_from_pickle(filename) - traininds, testinds = get_train_test_indices() + traininds, testinds = get_train_test_indices(Yaudio) X_train, Y_train, X_test, Y_test = get_train_test_sets(X, Y, traininds, testinds) df_result = feat_learner.classify(X_train, Y_train, X_test, Y_test) df_results = pd.concat([df_results, df_result], axis=0, ignore_index=True) - return df_results + return df_results + + +def classify_each_feature(X_train, Y_train, X_test, Y_test): + n_dim = X_train.shape[1] + feat_labels, feat_inds = map_and_average.get_feat_inds(n_dim=n_dim) + #df_results = pd.DataFrame() + # first the classification with all features together + df_results = feat_learner.classify(X_train, Y_train, X_test, Y_test) + # then append for each feature separately + for i in range(len(feat_inds)): + df_result = feat_learner.classify(X_train[:, feat_inds[i]], Y_train, + X_test[:, feat_inds[i]], Y_test) + df_results = pd.concat([df_results, df_result], axis=1, ignore_index=True) + return df_results def plot_CF(CF, labels=None, figurename=None):