annotate Code/genre_classification/learning/preprocess_spectrograms_gtzan.py @ 47:b0186d4a4496 tip

Move 7Digital dataset to Downloads
author Paulo Chiliguano <p.e.chiliguano@se14.qmul.ac.uk>
date Sat, 09 Jul 2022 00:50:43 -0500
parents 68a62ca32441
children
rev   line source
p@24 1 # -*- coding: utf-8 -*-
p@24 2 """
p@24 3 Created on Thu Jul 23 21:55:58 2015
p@24 4
p@24 5 @author: paulochiliguano
p@24 6 """
p@24 7
p@24 8
p@24 9 import tables
p@24 10 import numpy as np
p@24 11 import cPickle
p@24 12 import sklearn.preprocessing as preprocessing
p@24 13
p@24 14 #Read HDF5 file that contains log-mel spectrograms
p@24 15 filename = '/homes/pchilguano/msc_project/dataset/gtzan/features/\
p@24 16 feats_3sec_9.h5'
p@24 17 with tables.openFile(filename, 'r') as f:
p@24 18 features = f.root.x.read()
p@24 19 #filenames = f.root.filenames.read()
p@24 20
p@24 21 #Pre-processing of spectrograms mean=0 and std=1
p@24 22 #initial_shape = features.shape[1:]
p@24 23 n_per_example = np.prod(features.shape[1:-1])
p@24 24 number_of_features = features.shape[-1]
p@24 25 flat_data = features.view()
p@24 26 flat_data.shape = (-1, number_of_features)
p@24 27 scaler = preprocessing.StandardScaler().fit(flat_data)
p@24 28 flat_data = scaler.transform(flat_data)
p@24 29 flat_data.shape = (features.shape[0], -1)
p@24 30 #flat_targets = filenames.repeat(n_per_example)
p@24 31 #genre = np.asarray([line.strip().split('\t')[1] for line in open(filename,'r').readlines()])
p@24 32
p@24 33 #Read labels from ground truth
p@24 34 filename = '/homes/pchilguano/msc_project/dataset/gtzan/lists/ground_truth.txt'
p@24 35 with open(filename, 'r') as f:
p@24 36 tag_set = set()
p@24 37 for line in f:
p@24 38 tag = line.strip().split('\t')[1]
p@24 39 tag_set.add(tag)
p@24 40
p@24 41 #Assign label to a discrete number
p@24 42 tag_dict = dict([(item, index) for index, item in enumerate(sorted(tag_set))])
p@24 43 with open(filename, 'r') as f:
p@24 44 target = np.asarray([], dtype='int32')
p@24 45 mp3_dict = {}
p@24 46 for line in f:
p@24 47 tag = line.strip().split('\t')[1]
p@24 48 target = np.append(target, tag_dict[tag])
p@24 49
p@24 50 train_input, valid_input, test_input = np.array_split(
p@24 51 flat_data,
p@24 52 [flat_data.shape[0]*1/2,
p@24 53 flat_data.shape[0]*3/4]
p@24 54 )
p@24 55 train_target, valid_target, test_target = np.array_split(
p@24 56 target,
p@24 57 [target.shape[0]*1/2,
p@24 58 target.shape[0]*3/4]
p@24 59 )
p@24 60
p@24 61 f = file('/homes/pchilguano/msc_project/dataset/gtzan/features/\
p@24 62 gtzan_3sec_9.pkl', 'wb')
p@24 63 cPickle.dump(
p@24 64 (
p@24 65 (train_input, train_target),
p@24 66 (valid_input, valid_target),
p@24 67 (test_input, test_target)
p@24 68 ),
p@24 69 f,
p@24 70 protocol=cPickle.HIGHEST_PROTOCOL
p@24 71 )
p@24 72 f.close()
p@24 73
p@24 74 '''
p@24 75 flat_target = target.repeat(n_per_example)
p@24 76
p@24 77 train_input, valid_input, test_input = np.array_split(flat_data, [flat_data.shape[0]*4/5, flat_data.shape[0]*9/10])
p@24 78 train_target, valid_target, test_target = np.array_split(flat_target, [flat_target.shape[0]*4/5, flat_target.shape[0]*9/10])
p@24 79
p@24 80 f = file('/homes/pchilguano/deep_learning/gtzan_logistic.pkl', 'wb')
p@24 81 cPickle.dump(((train_input, train_target), (valid_input, valid_target), (test_input, test_target)), f, protocol=cPickle.HIGHEST_PROTOCOL)
p@24 82 f.close()
p@24 83 '''