diff scripts/load_dataset.py @ 13:98718fdd8326 branch-tests

edits in the core functions
author Maria Panteli <m.x.panteli@gmail.com>
date Tue, 12 Sep 2017 18:03:47 +0100
parents e50c63cf96be
children 9847b954c217
line wrap: on
line diff
--- a/scripts/load_dataset.py	Tue Sep 12 13:31:42 2017 +0100
+++ b/scripts/load_dataset.py	Tue Sep 12 18:03:47 2017 +0100
@@ -8,22 +8,86 @@
 import numpy as np
 import pandas as pd
 import pickle
+from sklearn.model_selection import train_test_split
 
 import load_features
-import util_dataset
 import util_filter_dataset
 
 
 #METADATA_FILE = 'sample_dataset/metadata.csv'
 #OUTPUT_FILES = ['sample_dataset/train_data.pickle', 'sample_dataset/val_data.pickle', 'sample_dataset/test_data.pickle']
-WIN_SIZE = 2
+WIN_SIZE = 8
 METADATA_FILE = 'data/metadata_BLSM_language_all.csv'
-#OUTPUT_FILES = ['/import/c4dm-04/mariap/train_data_cf.pickle', '/import/c4dm-04/mariap/val_data_cf.pickle', '/import/c4dm-04/mariap/test_data_cf.pickle']
-#OUTPUT_FILES = ['/import/c4dm-04/mariap/train_data_cf_4.pickle', '/import/c4dm-04/mariap/val_data_cf_4.pickle', '/import/c4dm-04/mariap/test_data_cf_4.pickle']
 OUTPUT_FILES = ['/import/c4dm-04/mariap/train_data_melodia_'+str(WIN_SIZE)+'.pickle', 
                 '/import/c4dm-04/mariap/val_data_melodia_'+str(WIN_SIZE)+'.pickle', 
                 '/import/c4dm-04/mariap/test_data_melodia_'+str(WIN_SIZE)+'.pickle']
 
+
+def get_train_val_test_idx(X, Y, seed=None):
+    """ Split in train, validation, test sets.
+    
+    Parameters
+    ----------
+    X : np.array
+        Data or indices.
+    Y : np.array
+        Class labels for data in X.
+    seed: int
+        Random seed.
+    Returns
+    -------
+    (X_train, Y_train) : tuple
+        Data X and labels y for the train set
+    (X_val, Y_val) : tuple
+        Data X and labels y for the validation set
+    (X_test, Y_test) : tuple
+        Data X and labels y for the test set
+    
+    """
+    X_train, X_val_test, Y_train, Y_val_test = train_test_split(X, Y, train_size=0.6, random_state=seed, stratify=Y)
+    X_val, X_test, Y_val, Y_test = train_test_split(X_val_test, Y_val_test, train_size=0.5, random_state=seed, stratify=Y_val_test)
+    return (X_train, Y_train), (X_val, Y_val), (X_test, Y_test)
+
+
+def subset_labels(Y, N_min=10, N_max=100, seed=None):
+    """ Subset dataset to contain minimum N_min and maximum N_max instances 
+        per class. Return indices for this subset. 
+    
+    Parameters
+    ----------
+    Y : np.array
+        Class labels
+    N_min : int
+        Minimum instances per class
+    N_max : int
+        Maximum instances per class
+    seed: int
+        Random seed.
+    
+    Returns
+    -------
+    subset_idx : np.array
+        Indices for a subset with classes of size bounded by N_min, N_max
+    
+    """
+    np.random.seed(seed=seed)
+    subset_idx = []
+    labels = np.unique(Y)
+    for label in labels:
+        label_idx = np.where(Y==label)[0]
+        counts = len(label_idx)
+        if counts>=N_max:
+            subset_idx.append(np.random.choice(label_idx, N_max, replace=False))
+        elif counts>=N_min and counts<N_max:
+            subset_idx.append(label_idx)
+        else:
+            # not enough samples for this class, skip
+            continue
+    if len(subset_idx)>0:
+        subset_idx = np.concatenate(subset_idx, axis=0)
+    return subset_idx
+
+
 def extract_features(df, win2sec=8.0):
     """Extract features from melspec and chroma.
     
@@ -56,12 +120,12 @@
     # load dataset
     df = pd.read_csv(METADATA_FILE)
     df = util_filter_dataset.remove_missing_data(df)
-    subset_idx = util_dataset.subset_labels(df['Country'].get_values())
+    subset_idx = subset_labels(df['Country'].get_values())
     df = df.iloc[subset_idx, :]
     X, Y = np.arange(len(df)), df['Country'].get_values()
     
     # split in train, val, test set
-    train_set, val_set, test_set = util_dataset.get_train_val_test_idx(X, Y) 
+    train_set, val_set, test_set = get_train_val_test_idx(X, Y) 
     
     # extract features and write output
     X_train, Y_train, Y_audio_train = extract_features(df.iloc[train_set[0], :], win2sec=WIN_SIZE)
@@ -76,6 +140,3 @@
     with open(OUTPUT_FILES[2], 'wb') as f:
         pickle.dump([X_test, Y_test, Y_audio_test], f)
 
-#out_file = '/import/c4dm-04/mariap/test_data_melodia_1_test.pickle'
-#    pickle.dump([X_test, Y_test, Y_audio_test], f)
-#with open(out_file, 'wb') as f: