annotate Code/genre_classification/classification/convolutional_mlp_7digital.py @ 47:b0186d4a4496 tip

Move 7Digital dataset to Downloads
author Paulo Chiliguano <p.e.chiliguano@se14.qmul.ac.uk>
date Sat, 09 Jul 2022 00:50:43 -0500
parents 68a62ca32441
children
rev   line source
p@24 1 """This tutorial introduces the LeNet5 neural network architecture
p@24 2 using Theano. LeNet5 is a convolutional neural network, good for
p@24 3 classifying images. This tutorial shows how to build the architecture,
p@24 4 and comes with all the hyper-parameters you need to reproduce the
p@24 5 paper's MNIST results.
p@24 6
p@24 7
p@24 8 This implementation simplifies the model in the following ways:
p@24 9
p@24 10 - LeNetConvPool doesn't implement location-specific gain and bias parameters
p@24 11 - LeNetConvPool doesn't implement pooling by average, it implements pooling
p@24 12 by max.
p@24 13 - Digit classification is implemented with a logistic regression rather than
p@24 14 an RBF network
p@24 15 - LeNet5 was not fully-connected convolutions at second layer
p@24 16
p@24 17 References:
p@24 18 - Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
p@24 19 Gradient-Based Learning Applied to Document
p@24 20 Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
p@24 21 http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
p@24 22
p@24 23 """
p@24 24 import os
p@24 25 import sys
p@24 26 import timeit
p@24 27
p@24 28 import numpy
p@24 29
p@24 30 import theano
p@24 31 import theano.tensor as T
p@24 32 from theano.tensor.signal import downsample
p@24 33 from theano.tensor.nnet import conv
p@24 34
p@24 35 from logistic_sgd import LogisticRegression, load_data
p@24 36 from mlp import HiddenLayer
p@24 37
p@24 38 # Paulo: Additional libraries
p@24 39 import cPickle
p@24 40 from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
p@24 41
p@24 42 # Paulo: Rectifier Linear Unit
p@24 43 # Source: http://stackoverflow.com/questions/26497564/theano-hiddenlayer-activation-function
p@24 44 def relu(x):
p@24 45 return T.maximum(0.,x)
p@24 46
p@24 47 # Paulo: Random Streams
p@24 48 srng = RandomStreams()
p@24 49
p@24 50 class LeNetConvPoolLayer(object):
p@24 51 """Pool Layer of a convolutional network """
p@24 52
p@24 53 def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
p@24 54 """
p@24 55 Allocate a LeNetConvPoolLayer with shared variable internal parameters.
p@24 56
p@24 57 :type rng: numpy.random.RandomState
p@24 58 :param rng: a random number generator used to initialize weights
p@24 59
p@24 60 :type input: theano.tensor.dtensor4
p@24 61 :param input: symbolic image tensor, of shape image_shape
p@24 62
p@24 63 :type filter_shape: tuple or list of length 4
p@24 64 :param filter_shape: (number of filters, num input feature maps,
p@24 65 filter height, filter width)
p@24 66
p@24 67 :type image_shape: tuple or list of length 4
p@24 68 :param image_shape: (batch size, num input feature maps,
p@24 69 image height, image width)
p@24 70
p@24 71 :type poolsize: tuple or list of length 2
p@24 72 :param poolsize: the downsampling (pooling) factor (#rows, #cols)
p@24 73 """
p@24 74
p@24 75 assert image_shape[1] == filter_shape[1]
p@24 76 self.input = input
p@24 77
p@24 78 # there are "num input feature maps * filter height * filter width"
p@24 79 # inputs to each hidden unit
p@24 80 fan_in = numpy.prod(filter_shape[1:])
p@24 81 # each unit in the lower layer receives a gradient from:
p@24 82 # "num output feature maps * filter height * filter width" /
p@24 83 # pooling size
p@24 84 fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
p@24 85 numpy.prod(poolsize))
p@24 86 # initialize weights with random weights
p@24 87 W_bound = numpy.sqrt(6. / (fan_in + fan_out))
p@24 88 self.W = theano.shared(
p@24 89 numpy.asarray(
p@24 90 rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
p@24 91 dtype=theano.config.floatX
p@24 92 ),
p@24 93 borrow=True
p@24 94 )
p@24 95
p@24 96 # the bias is a 1D tensor -- one bias per output feature map
p@24 97 b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
p@24 98 self.b = theano.shared(value=b_values, borrow=True)
p@24 99
p@24 100 # convolve input feature maps with filters
p@24 101 conv_out = conv.conv2d(
p@24 102 input=input,
p@24 103 filters=self.W,
p@24 104 filter_shape=filter_shape,
p@24 105 image_shape=image_shape
p@24 106 )
p@24 107
p@24 108 # downsample each feature map individually, using maxpooling
p@24 109 pooled_out = downsample.max_pool_2d(
p@24 110 input=conv_out,
p@24 111 ds=poolsize,
p@24 112 ignore_border=True
p@24 113 )
p@24 114
p@24 115 # Paulo: dropout
p@24 116 # Source: https://github.com/Newmu/Theano-Tutorials/blob/master/5_convolutional_net.py
p@24 117 retain_prob = 1 - 0.20
p@24 118 pooled_out *= srng.binomial(
p@24 119 pooled_out.shape,
p@24 120 p=retain_prob,
p@24 121 dtype=theano.config.floatX)
p@24 122 pooled_out /= retain_prob
p@24 123
p@24 124 # add the bias term. Since the bias is a vector (1D array), we first
p@24 125 # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
p@24 126 # thus be broadcasted across mini-batches and feature map
p@24 127 # width & height
p@24 128 #self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
p@24 129 self.output = relu(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
p@24 130
p@24 131 # store parameters of this layer
p@24 132 self.params = [self.W, self.b]
p@24 133
p@24 134 # keep track of model input
p@24 135 self.input = input
p@24 136
p@24 137 '''
p@24 138 def evaluate_lenet5(learning_rate=0.01, n_epochs=200,
p@24 139 dataset='mnist.pkl.gz',
p@24 140 nkerns=[32, 32], batch_size=10):
p@24 141 """ Demonstrates lenet on MNIST dataset
p@24 142
p@24 143 :type learning_rate: float
p@24 144 :param learning_rate: learning rate used (factor for the stochastic
p@24 145 gradient)
p@24 146
p@24 147 :type n_epochs: int
p@24 148 :param n_epochs: maximal number of epochs to run the optimizer
p@24 149
p@24 150 :type dataset: string
p@24 151 :param dataset: path to the dataset used for training /testing (MNIST here)
p@24 152
p@24 153 :type nkerns: list of ints
p@24 154 :param nkerns: number of kernels on each layer
p@24 155 """
p@24 156
p@24 157 rng = numpy.random.RandomState(23455)
p@24 158
p@24 159 datasets = load_data(dataset)
p@24 160
p@24 161 train_set_x, train_set_y = datasets[0]
p@24 162 valid_set_x, valid_set_y = datasets[1]
p@24 163 test_set_x, test_set_y = datasets[2]
p@24 164
p@24 165 # compute number of minibatches for training, validation and testing
p@24 166 n_train_batches = train_set_x.get_value(borrow=True).shape[0]
p@24 167 n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
p@24 168 n_test_batches = test_set_x.get_value(borrow=True).shape[0]
p@24 169
p@24 170 n_train_batches /= batch_size
p@24 171 n_valid_batches /= batch_size
p@24 172 n_test_batches /= batch_size
p@24 173
p@24 174 # allocate symbolic variables for the data
p@24 175 index = T.lscalar() # index to a [mini]batch
p@24 176
p@24 177 # start-snippet-1
p@24 178 x = T.matrix('x') # the data is presented as rasterized images
p@24 179 y = T.ivector('y') # the labels are presented as 1D vector of
p@24 180 # [int] labels
p@24 181
p@24 182 ######################
p@24 183 # BUILD ACTUAL MODEL #
p@24 184 ######################
p@24 185 print '... building the model'
p@24 186
p@24 187 # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
p@24 188 # to a 4D tensor, compatible with our LeNetConvPoolLayer
p@24 189 # (28, 28) is the size of MNIST images.
p@24 190 #layer0_input = x.reshape((batch_size, 1, 28, 28))
p@24 191 layer0_input = x.reshape((batch_size, 1, 130, 128))
p@24 192 # Construct the first convolutional pooling layer:
p@24 193 # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
p@24 194 # maxpooling reduces this further to (24/2, 24/2) = (12, 12)
p@24 195 # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
p@24 196 layer0 = LeNetConvPoolLayer(
p@24 197 rng,
p@24 198 input=layer0_input,
p@24 199 #image_shape=(batch_size, 1, 28, 28),
p@24 200 image_shape=(batch_size, 1, 130, 128),
p@24 201 #filter_shape=(nkerns[0], 1, 5, 5),
p@24 202 filter_shape=(nkerns[0], 1, 8, 1),
p@24 203 #poolsize=(2, 2)
p@24 204 poolsize=(4, 1)
p@24 205 )
p@24 206
p@24 207 # Construct the second convolutional pooling layer
p@24 208 # filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
p@24 209 # maxpooling reduces this further to (8/2, 8/2) = (4, 4)
p@24 210 # 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
p@24 211 layer1 = LeNetConvPoolLayer(
p@24 212 rng,
p@24 213 input=layer0.output,
p@24 214 #image_shape=(batch_size, nkerns[0], 12, 12),
p@24 215 image_shape=(batch_size, nkerns[0], 30, 128),
p@24 216 #filter_shape=(nkerns[1], nkerns[0], 5, 5),
p@24 217 filter_shape=(nkerns[1], nkerns[0], 8, 1),
p@24 218 #poolsize=(2, 2)
p@24 219 poolsize=(4, 1)
p@24 220 )
p@24 221
p@24 222 # the HiddenLayer being fully-connected, it operates on 2D matrices of
p@24 223 # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
p@24 224 # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
p@24 225 # or (500, 50 * 4 * 4) = (500, 800) with the default values.
p@24 226 layer2_input = layer1.output.flatten(2)
p@24 227
p@24 228 # construct a fully-connected sigmoidal layer
p@24 229 layer2 = HiddenLayer(
p@24 230 rng,
p@24 231 input=layer2_input,
p@24 232 #n_in=nkerns[1] * 4 * 4,
p@24 233 n_in=nkerns[1] * 5 * 128,
p@24 234 n_out=500,
p@24 235 #n_out=100,
p@24 236 #activation=T.tanh
p@24 237 activation=relu
p@24 238 )
p@24 239
p@24 240 # classify the values of the fully-connected sigmoidal layer
p@24 241 layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
p@24 242 #layer4 = LogisticRegression(input=layer3.output, n_in=50, n_out=10)
p@24 243
p@24 244 # the cost we minimize during training is the NLL of the model
p@24 245 cost = layer3.negative_log_likelihood(y)
p@24 246
p@24 247 # create a function to compute the mistakes that are made by the model
p@24 248 test_model = theano.function(
p@24 249 [index],
p@24 250 layer3.errors(y),
p@24 251 givens={
p@24 252 x: test_set_x[index * batch_size: (index + 1) * batch_size],
p@24 253 y: test_set_y[index * batch_size: (index + 1) * batch_size]
p@24 254 }
p@24 255 )
p@24 256
p@24 257 validate_model = theano.function(
p@24 258 [index],
p@24 259 layer3.errors(y),
p@24 260 givens={
p@24 261 x: valid_set_x[index * batch_size: (index + 1) * batch_size],
p@24 262 y: valid_set_y[index * batch_size: (index + 1) * batch_size]
p@24 263 }
p@24 264 )
p@24 265
p@24 266 # Paulo: Set best param for MLP pre-training
p@24 267 f = file('/homes/pchilguano/deep_learning/best_params.pkl', 'rb')
p@24 268 params0, params1, params2, params3 = cPickle.load(f)
p@24 269 f.close()
p@24 270 layer0.W.set_value(params0[0])
p@24 271 layer0.b.set_value(params0[1])
p@24 272 layer1.W.set_value(params1[0])
p@24 273 layer1.b.set_value(params1[1])
p@24 274 layer2.W.set_value(params2[0])
p@24 275 layer2.b.set_value(params2[1])
p@24 276 layer3.W.set_value(params3[0])
p@24 277 layer3.b.set_value(params3[1])
p@24 278
p@24 279 # create a list of all model parameters to be fit by gradient descent
p@24 280 params = layer3.params + layer2.params + layer1.params + layer0.params
p@24 281 #params = layer4.params + layer3.params + layer2.params + layer1.params + layer0.params
p@24 282
p@24 283 # create a list of gradients for all model parameters
p@24 284 grads = T.grad(cost, params)
p@24 285
p@24 286 # train_model is a function that updates the model parameters by
p@24 287 # SGD Since this model has many parameters, it would be tedious to
p@24 288 # manually create an update rule for each model parameter. We thus
p@24 289 # create the updates list by automatically looping over all
p@24 290 # (params[i], grads[i]) pairs.
p@24 291 updates = [
p@24 292 (param_i, param_i - learning_rate * grad_i)
p@24 293 for param_i, grad_i in zip(params, grads)
p@24 294 ]
p@24 295
p@24 296 train_model = theano.function(
p@24 297 [index],
p@24 298 cost,
p@24 299 updates=updates,
p@24 300 givens={
p@24 301 x: train_set_x[index * batch_size: (index + 1) * batch_size],
p@24 302 y: train_set_y[index * batch_size: (index + 1) * batch_size]
p@24 303 }
p@24 304 )
p@24 305 # end-snippet-1
p@24 306
p@24 307 ###############
p@24 308 # TRAIN MODEL #
p@24 309 ###############
p@24 310 print '... training'
p@24 311 # early-stopping parameters
p@24 312 patience = 1000 # look as this many examples regardless
p@24 313 patience_increase = 2 # wait this much longer when a new best is
p@24 314 # found
p@24 315 improvement_threshold = 0.995 # a relative improvement of this much is
p@24 316 # considered significant
p@24 317 validation_frequency = min(n_train_batches, patience / 2)
p@24 318 # go through this many
p@24 319 # minibatche before checking the network
p@24 320 # on the validation set; in this case we
p@24 321 # check every epoch
p@24 322
p@24 323 best_validation_loss = numpy.inf
p@24 324 best_iter = 0
p@24 325 test_score = 0.
p@24 326 start_time = timeit.default_timer()
p@24 327
p@24 328 epoch = 0
p@24 329 done_looping = False
p@24 330
p@24 331 while (epoch < n_epochs) and (not done_looping):
p@24 332 epoch = epoch + 1
p@24 333 for minibatch_index in xrange(n_train_batches):
p@24 334
p@24 335 iter = (epoch - 1) * n_train_batches + minibatch_index
p@24 336
p@24 337 if iter % 100 == 0:
p@24 338 print 'training @ iter = ', iter
p@24 339 cost_ij = train_model(minibatch_index)
p@24 340
p@24 341 if (iter + 1) % validation_frequency == 0:
p@24 342
p@24 343 # compute zero-one loss on validation set
p@24 344 validation_losses = [validate_model(i) for i
p@24 345 in xrange(n_valid_batches)]
p@24 346 this_validation_loss = numpy.mean(validation_losses)
p@24 347 print('epoch %i, minibatch %i/%i, validation error %f %%' %
p@24 348 (epoch, minibatch_index + 1, n_train_batches,
p@24 349 this_validation_loss * 100.))
p@24 350
p@24 351 # if we got the best validation score until now
p@24 352 if this_validation_loss < best_validation_loss:
p@24 353
p@24 354 #improve patience if loss improvement is good enough
p@24 355 if this_validation_loss < best_validation_loss * \
p@24 356 improvement_threshold:
p@24 357 patience = max(patience, iter * patience_increase)
p@24 358
p@24 359 # save best validation score and iteration number
p@24 360 best_validation_loss = this_validation_loss
p@24 361 best_iter = iter
p@24 362
p@24 363 # test it on the test set
p@24 364 test_losses = [
p@24 365 test_model(i)
p@24 366 for i in xrange(n_test_batches)
p@24 367 ]
p@24 368 test_score = numpy.mean(test_losses)
p@24 369 print((' epoch %i, minibatch %i/%i, test error of '
p@24 370 'best model %f %%') %
p@24 371 (epoch, minibatch_index + 1, n_train_batches,
p@24 372 test_score * 100.))
p@24 373 # Paulo: Get best parameters for MLP
p@24 374 best_params0 = [param.get_value().copy() for param in layer0.params]
p@24 375 best_params1 = [param.get_value().copy() for param in layer1.params]
p@24 376 best_params2 = [param.get_value().copy() for param in layer2.params]
p@24 377 best_params3 = [param.get_value().copy() for param in layer3.params]
p@24 378 #best_params4 = [param.get_value().copy() for param in layer4.params]
p@24 379
p@24 380 if patience <= iter:
p@24 381 done_looping = True
p@24 382 break
p@24 383
p@24 384 end_time = timeit.default_timer()
p@24 385 print('Optimization complete.')
p@24 386 print('Best validation score of %f %% obtained at iteration %i, '
p@24 387 'with test performance %f %%' %
p@24 388 (best_validation_loss * 100., best_iter + 1, test_score * 100.))
p@24 389 print >> sys.stderr, ('The code for file ' +
p@24 390 os.path.split(__file__)[1] +
p@24 391 ' ran for %.2fm' % ((end_time - start_time) / 60.))
p@24 392 # Paulo: Save best param for MLP
p@24 393 f = file('/homes/pchilguano/deep_learning/best_params.pkl', 'wb')
p@24 394 cPickle.dump((best_params0, best_params1, best_params2, best_params3), f, protocol=cPickle.HIGHEST_PROTOCOL)
p@24 395 f.close()
p@24 396 '''
p@24 397 def genres_lenet5(dataset, nkerns=[32, 32], batch_size=10):
p@24 398 """
p@24 399 :type dataset: string
p@24 400 :param dataset: path to the dataset used for training /testing (MNIST here)
p@24 401
p@24 402 :type nkerns: list of ints
p@24 403 :param nkerns: number of kernels on each layer
p@24 404 """
p@24 405
p@24 406 rng = numpy.random.RandomState(23455)
p@24 407
p@24 408 f = file(dataset, 'rb')
p@24 409 data_x = cPickle.load(f)
p@24 410 f.close()
p@24 411
p@24 412 test_set_x = theano.shared(
p@24 413 numpy.asarray(
p@24 414 data_x,
p@24 415 dtype=theano.config.floatX
p@24 416 ),
p@24 417 borrow=True
p@24 418 )
p@24 419
p@24 420
p@24 421 #datasets = load_data(dataset)
p@24 422
p@24 423 #train_set_x, train_set_y = datasets[0]
p@24 424 #valid_set_x, valid_set_y = datasets[1]
p@24 425 #test_set_x, test_set_y = datasets[2]
p@24 426
p@24 427 # compute number of minibatches for training, validation and testing
p@24 428 #n_train_batches = train_set_x.get_value(borrow=True).shape[0]
p@24 429 #n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
p@24 430 n_test_batches = test_set_x.get_value(borrow=True).shape[0]
p@24 431
p@24 432 #n_train_batches /= batch_size
p@24 433 #n_valid_batches /= batch_size
p@24 434 n_test_batches /= batch_size
p@24 435
p@24 436 # allocate symbolic variables for the data
p@24 437 index = T.lscalar() # index to a [mini]batch
p@24 438
p@24 439 # start-snippet-1
p@24 440 x = T.matrix('x') # the data is presented as rasterized images
p@24 441 #y = T.ivector('y') # the labels are presented as 1D vector of
p@24 442 # [int] labels
p@24 443
p@24 444 ######################
p@24 445 # BUILD ACTUAL MODEL #
p@24 446 ######################
p@24 447 print '... building the model'
p@24 448
p@24 449 # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
p@24 450 # to a 4D tensor, compatible with our LeNetConvPoolLayer
p@24 451 # (28, 28) is the size of MNIST images.
p@24 452 layer0_input = x.reshape((batch_size, 1, 130, 128))
p@24 453 # Construct the first convolutional pooling layer:
p@24 454 # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
p@24 455 # maxpooling reduces this further to (24/2, 24/2) = (12, 12)
p@24 456 # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
p@24 457 layer0 = LeNetConvPoolLayer(
p@24 458 rng,
p@24 459 input=layer0_input,
p@24 460 image_shape=(batch_size, 1, 130, 128),
p@24 461 filter_shape=(nkerns[0], 1, 8, 1),
p@24 462 poolsize=(4, 1)
p@24 463 )
p@24 464
p@24 465 # Construct the second convolutional pooling layer
p@24 466 # filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
p@24 467 # maxpooling reduces this further to (8/2, 8/2) = (4, 4)
p@24 468 # 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
p@24 469 layer1 = LeNetConvPoolLayer(
p@24 470 rng,
p@24 471 input=layer0.output,
p@24 472 image_shape=(batch_size, nkerns[0], 30, 128),
p@24 473 filter_shape=(nkerns[1], nkerns[0], 8, 1),
p@24 474 poolsize=(4, 1)
p@24 475 )
p@24 476
p@24 477 # the HiddenLayer being fully-connected, it operates on 2D matrices of
p@24 478 # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
p@24 479 # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
p@24 480 # or (500, 50 * 4 * 4) = (500, 800) with the default values.
p@24 481 layer2_input = layer1.output.flatten(2)
p@24 482
p@24 483 # construct a fully-connected sigmoidal layer
p@24 484 layer2 = HiddenLayer(
p@24 485 rng,
p@24 486 input=layer2_input,
p@24 487 n_in=nkerns[1] * 5 * 128,
p@24 488 n_out=500,
p@24 489 activation=relu
p@24 490 )
p@24 491
p@24 492 # classify the values of the fully-connected sigmoidal layer
p@24 493 layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
p@24 494
p@24 495 # the cost we minimize during training is the NLL of the model
p@24 496 # cost = layer3.negative_log_likelihood(y)
p@24 497 '''
p@24 498 # create a function to compute the mistakes that are made by the model
p@24 499 test_model = theano.function(
p@24 500 [index],
p@24 501 layer3.errors(y),
p@24 502 givens={
p@24 503 x: test_set_x[index * batch_size: (index + 1) * batch_size],
p@24 504 y: test_set_y[index * batch_size: (index + 1) * batch_size]
p@24 505 }
p@24 506 )
p@24 507
p@24 508 validate_model = theano.function(
p@24 509 [index],
p@24 510 layer3.errors(y),
p@24 511 givens={
p@24 512 x: valid_set_x[index * batch_size: (index + 1) * batch_size],
p@24 513 y: valid_set_y[index * batch_size: (index + 1) * batch_size]
p@24 514 }
p@24 515 )
p@24 516 '''
p@24 517 # Genre soft classification
p@24 518 test_model = theano.function(
p@24 519 [index],
p@24 520 layer3.p_y_given_x,
p@24 521 givens={
p@24 522 x: test_set_x[index * batch_size: (index + 1) * batch_size]
p@24 523 }
p@24 524 )
p@24 525
p@24 526 # Paulo: Set best paramaters
p@24 527 f = file('/homes/pchilguano/msc_project/dataset/genre_classification/\
p@24 528 best_params.pkl', 'rb')
p@24 529 params0, params1, params2, params3 = cPickle.load(f)
p@24 530 f.close()
p@24 531 layer0.W.set_value(params0[0])
p@24 532 layer0.b.set_value(params0[1])
p@24 533 layer1.W.set_value(params1[0])
p@24 534 layer1.b.set_value(params1[1])
p@24 535 layer2.W.set_value(params2[0])
p@24 536 layer2.b.set_value(params2[1])
p@24 537 layer3.W.set_value(params3[0])
p@24 538 layer3.b.set_value(params3[1])
p@24 539
p@24 540 # Probabilities
p@24 541 print "Computing probabilities..."
p@24 542 start_time = timeit.default_timer()
p@24 543 genre_prob_batch = [test_model(i).tolist() for i in xrange(n_test_batches)]
p@24 544 end_time = timeit.default_timer()
p@24 545 print >> sys.stderr, ('The code for file ' +
p@24 546 os.path.split(__file__)[1] +
p@24 547 ' ran for %.2fm' % ((end_time - start_time) / 60.))
p@24 548 genre_prob = [item for sublist in genre_prob_batch for item in sublist]
p@24 549
p@24 550 filename = '/homes/pchilguano/msc_project/dataset/7digital/lists/\
p@24 551 audio_files.txt'
p@24 552 with open(filename, 'r') as f:
p@24 553 songID = [line.strip().split('/')[-1][:-4] for line in f]
p@24 554
p@24 555 items = dict(zip(songID, genre_prob))
p@24 556 print "Saving songs feature vectors in dictionary..."
p@24 557 f = file('/homes/pchilguano/msc_project/dataset/genre_classification/\
p@24 558 genre_prob.pkl', 'wb')
p@24 559 cPickle.dump(items, f, protocol=cPickle.HIGHEST_PROTOCOL)
p@24 560 f.close()
p@24 561
p@24 562 '''
p@24 563 # create a list of all model parameters to be fit by gradient descent
p@24 564 params = layer3.params + layer2.params + layer1.params + layer0.params
p@24 565
p@24 566 # create a list of gradients for all model parameters
p@24 567 grads = T.grad(cost, params)
p@24 568
p@24 569 # train_model is a function that updates the model parameters by
p@24 570 # SGD Since this model has many parameters, it would be tedious to
p@24 571 # manually create an update rule for each model parameter. We thus
p@24 572 # create the updates list by automatically looping over all
p@24 573 # (params[i], grads[i]) pairs.
p@24 574 updates = [
p@24 575 (param_i, param_i - learning_rate * grad_i)
p@24 576 for param_i, grad_i in zip(params, grads)
p@24 577 ]
p@24 578
p@24 579 train_model = theano.function(
p@24 580 [index],
p@24 581 cost,
p@24 582 updates=updates,
p@24 583 givens={
p@24 584 x: train_set_x[index * batch_size: (index + 1) * batch_size],
p@24 585 y: train_set_y[index * batch_size: (index + 1) * batch_size]
p@24 586 }
p@24 587 )
p@24 588 # end-snippet-1
p@24 589
p@24 590 ###############
p@24 591 # TRAIN MODEL #
p@24 592 ###############
p@24 593 print '... training'
p@24 594 # early-stopping parameters
p@24 595 patience = 1000 # look as this many examples regardless
p@24 596 patience_increase = 2 # wait this much longer when a new best is
p@24 597 # found
p@24 598 improvement_threshold = 0.995 # a relative improvement of this much is
p@24 599 # considered significant
p@24 600 validation_frequency = min(n_train_batches, patience / 2)
p@24 601 # go through this many
p@24 602 # minibatche before checking the network
p@24 603 # on the validation set; in this case we
p@24 604 # check every epoch
p@24 605
p@24 606 best_validation_loss = numpy.inf
p@24 607 best_iter = 0
p@24 608 test_score = 0.
p@24 609 start_time = timeit.default_timer()
p@24 610
p@24 611 epoch = 0
p@24 612 done_looping = False
p@24 613
p@24 614 while (epoch < n_epochs) and (not done_looping):
p@24 615 epoch = epoch + 1
p@24 616 for minibatch_index in xrange(n_train_batches):
p@24 617
p@24 618 iter = (epoch - 1) * n_train_batches + minibatch_index
p@24 619
p@24 620 if iter % 100 == 0:
p@24 621 print 'training @ iter = ', iter
p@24 622 cost_ij = train_model(minibatch_index)
p@24 623
p@24 624 if (iter + 1) % validation_frequency == 0:
p@24 625
p@24 626 # compute zero-one loss on validation set
p@24 627 validation_losses = [validate_model(i) for i
p@24 628 in xrange(n_valid_batches)]
p@24 629 this_validation_loss = numpy.mean(validation_losses)
p@24 630 print('epoch %i, minibatch %i/%i, validation error %f %%' %
p@24 631 (epoch, minibatch_index + 1, n_train_batches,
p@24 632 this_validation_loss * 100.))
p@24 633
p@24 634 # if we got the best validation score until now
p@24 635 if this_validation_loss < best_validation_loss:
p@24 636
p@24 637 #improve patience if loss improvement is good enough
p@24 638 if this_validation_loss < best_validation_loss * \
p@24 639 improvement_threshold:
p@24 640 patience = max(patience, iter * patience_increase)
p@24 641
p@24 642 # save best validation score and iteration number
p@24 643 best_validation_loss = this_validation_loss
p@24 644 best_iter = iter
p@24 645
p@24 646 # test it on the test set
p@24 647 test_losses = [
p@24 648 test_model(i)
p@24 649 for i in xrange(n_test_batches)
p@24 650 ]
p@24 651 test_score = numpy.mean(test_losses)
p@24 652 print((' epoch %i, minibatch %i/%i, test error of '
p@24 653 'best model %f %%') %
p@24 654 (epoch, minibatch_index + 1, n_train_batches,
p@24 655 test_score * 100.))
p@24 656 # Paulo: Get best parameters for MLP
p@24 657 best_params0 = [param.get_value().copy() for param in layer0.params]
p@24 658 best_params1 = [param.get_value().copy() for param in layer1.params]
p@24 659 best_params2 = [param.get_value().copy() for param in layer2.params]
p@24 660 best_params3 = [param.get_value().copy() for param in layer3.params]
p@24 661
p@24 662 if patience <= iter:
p@24 663 done_looping = True
p@24 664 break
p@24 665
p@24 666 end_time = timeit.default_timer()
p@24 667 print('Optimization complete.')
p@24 668 print('Best validation score of %f %% obtained at iteration %i, '
p@24 669 'with test performance %f %%' %
p@24 670 (best_validation_loss * 100., best_iter + 1, test_score * 100.))
p@24 671 print >> sys.stderr, ('The code for file ' +
p@24 672 os.path.split(__file__)[1] +
p@24 673 ' ran for %.2fm' % ((end_time - start_time) / 60.))
p@24 674
p@24 675 # Paulo: Save best param for MLP
p@24 676 f = file('/homes/pchilguano/deep_learning/genre_prob.pkl', 'wb')
p@24 677 cPickle.dump((best_params0, best_params1, best_params2, best_params3), f, protocol=cPickle.HIGHEST_PROTOCOL)
p@24 678 f.close()
p@24 679 '''
p@24 680 if __name__ == '__main__':
p@24 681 #evaluate_lenet5()
p@24 682 genres_lenet5(
p@24 683 dataset='/homes/pchilguano/msc_project/dataset/7digital/features/\
p@24 684 feats.pkl'
p@24 685 )
p@24 686
p@24 687 #def experiment(state, channel):
p@24 688 # evaluate_lenet5(state.learning_rate, dataset=state.dataset)