p@24: """This tutorial introduces the LeNet5 neural network architecture p@24: using Theano. LeNet5 is a convolutional neural network, good for p@24: classifying images. This tutorial shows how to build the architecture, p@24: and comes with all the hyper-parameters you need to reproduce the p@24: paper's MNIST results. p@24: p@24: p@24: This implementation simplifies the model in the following ways: p@24: p@24: - LeNetConvPool doesn't implement location-specific gain and bias parameters p@24: - LeNetConvPool doesn't implement pooling by average, it implements pooling p@24: by max. p@24: - Digit classification is implemented with a logistic regression rather than p@24: an RBF network p@24: - LeNet5 was not fully-connected convolutions at second layer p@24: p@24: References: p@24: - Y. LeCun, L. Bottou, Y. Bengio and P. Haffner: p@24: Gradient-Based Learning Applied to Document p@24: Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998. p@24: http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf p@24: p@24: """ p@24: import os p@24: import sys p@24: import timeit p@24: p@24: import numpy p@24: p@24: import theano p@24: import theano.tensor as T p@24: from theano.tensor.signal import downsample p@24: from theano.tensor.nnet import conv p@24: p@24: from logistic_sgd import LogisticRegression, load_data p@24: from mlp import HiddenLayer p@24: p@24: # Paulo Chiliguano: Additional libraries p@24: import cPickle p@24: from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams p@24: p@24: # Paulo Chiliguano: Rectifier Linear Unit p@24: # Source: http://stackoverflow.com/questions/26497564/theano-hiddenlayer-activation-function p@24: def relu(x): p@24: return T.maximum(0.,x) p@24: p@24: # Paulo: Random Streams p@24: srng = RandomStreams(seed=234) p@24: p@24: class LeNetConvPoolLayer(object): p@24: """Pool Layer of a convolutional network """ p@24: p@24: def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)): p@24: """ p@24: Allocate a LeNetConvPoolLayer with shared variable internal parameters. p@24: p@24: :type rng: numpy.random.RandomState p@24: :param rng: a random number generator used to initialize weights p@24: p@24: :type input: theano.tensor.dtensor4 p@24: :param input: symbolic image tensor, of shape image_shape p@24: p@24: :type filter_shape: tuple or list of length 4 p@24: :param filter_shape: (number of filters, num input feature maps, p@24: filter height, filter width) p@24: p@24: :type image_shape: tuple or list of length 4 p@24: :param image_shape: (batch size, num input feature maps, p@24: image height, image width) p@24: p@24: :type poolsize: tuple or list of length 2 p@24: :param poolsize: the downsampling (pooling) factor (#rows, #cols) p@24: """ p@24: p@24: assert image_shape[1] == filter_shape[1] p@24: self.input = input p@24: p@24: # there are "num input feature maps * filter height * filter width" p@24: # inputs to each hidden unit p@24: fan_in = numpy.prod(filter_shape[1:]) p@24: # each unit in the lower layer receives a gradient from: p@24: # "num output feature maps * filter height * filter width" / p@24: # pooling size p@24: fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) / p@24: numpy.prod(poolsize)) p@24: # initialize weights with random weights p@24: W_bound = numpy.sqrt(6. / (fan_in + fan_out)) p@24: self.W = theano.shared( p@24: numpy.asarray( p@24: rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), p@24: dtype=theano.config.floatX p@24: ), p@24: borrow=True p@24: ) p@24: p@24: # the bias is a 1D tensor -- one bias per output feature map p@24: b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX) p@24: self.b = theano.shared(value=b_values, borrow=True) p@24: p@24: # convolve input feature maps with filters p@24: conv_out = conv.conv2d( p@24: input=input, p@24: filters=self.W, p@24: filter_shape=filter_shape, p@24: image_shape=image_shape p@24: ) p@24: p@24: # downsample each feature map individually, using maxpooling p@24: pooled_out = downsample.max_pool_2d( p@24: input=conv_out, p@24: ds=poolsize, p@24: ignore_border=True p@24: ) p@24: p@24: # Paulo: dropout p@24: # Source: https://github.com/Newmu/Theano-Tutorials/blob/master/5_convolutional_net.py p@24: retain_prob = 1 - 0.20 p@24: pooled_out *= srng.binomial( p@24: pooled_out.shape, p@24: p=retain_prob, p@24: dtype=theano.config.floatX) p@24: pooled_out /= retain_prob p@24: p@24: # add the bias term. Since the bias is a vector (1D array), we first p@24: # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will p@24: # thus be broadcasted across mini-batches and feature map p@24: # width & height p@24: #self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) p@24: self.output = relu(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) p@24: p@24: # store parameters of this layer p@24: self.params = [self.W, self.b] p@24: p@24: # keep track of model input p@24: self.input = input p@24: p@24: p@24: def evaluate_lenet5(learning_rate=0.1, n_epochs=200, p@24: dataset='mnist.pkl.gz', p@24: nkerns=[20, 50], batch_size=500): p@24: """ Demonstrates lenet on MNIST dataset p@24: p@24: :type learning_rate: float p@24: :param learning_rate: learning rate used (factor for the stochastic p@24: gradient) p@24: p@24: :type n_epochs: int p@24: :param n_epochs: maximal number of epochs to run the optimizer p@24: p@24: :type dataset: string p@24: :param dataset: path to the dataset used for training /testing (MNIST here) p@24: p@24: :type nkerns: list of ints p@24: :param nkerns: number of kernels on each layer p@24: """ p@24: p@24: rng = numpy.random.RandomState(23455) p@24: p@24: datasets = load_data(dataset) p@24: p@24: train_set_x, train_set_y = datasets[0] p@24: valid_set_x, valid_set_y = datasets[1] p@24: test_set_x, test_set_y = datasets[2] p@24: p@24: # compute number of minibatches for training, validation and testing p@24: n_train_batches = train_set_x.get_value(borrow=True).shape[0] p@24: n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] p@24: n_test_batches = test_set_x.get_value(borrow=True).shape[0] p@24: p@24: n_train_batches /= batch_size p@24: n_valid_batches /= batch_size p@24: n_test_batches /= batch_size p@24: p@24: # allocate symbolic variables for the data p@24: index = T.lscalar() # index to a [mini]batch p@24: p@24: # start-snippet-1 p@24: x = T.matrix('x') # the data is presented as rasterized images p@24: y = T.ivector('y') # the labels are presented as 1D vector of p@24: # [int] labels p@24: p@24: ###################### p@24: # BUILD ACTUAL MODEL # p@24: ###################### p@24: print '... building the model' p@24: p@24: # Reshape matrix of rasterized images of shape (batch_size, 28 * 28) p@24: # to a 4D tensor, compatible with our LeNetConvPoolLayer p@24: # (28, 28) is the size of MNIST images. p@24: #layer0_input = x.reshape((batch_size, 1, 28, 28)) p@24: layer0_input = x.reshape((batch_size, 1, 130, 128)) p@24: # Construct the first convolutional pooling layer: p@24: # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24) p@24: # maxpooling reduces this further to (24/2, 24/2) = (12, 12) p@24: # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12) p@24: layer0 = LeNetConvPoolLayer( p@24: rng, p@24: input=layer0_input, p@24: #image_shape=(batch_size, 1, 28, 28), p@24: image_shape=(batch_size, 1, 130, 128), p@24: #filter_shape=(nkerns[0], 1, 5, 5), p@24: filter_shape=(nkerns[0], 1, 8, 1), p@24: #poolsize=(2, 2) p@24: poolsize=(4, 1) p@24: ) p@24: p@24: # Construct the second convolutional pooling layer p@24: # filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8) p@24: # maxpooling reduces this further to (8/2, 8/2) = (4, 4) p@24: # 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4) p@24: layer1 = LeNetConvPoolLayer( p@24: rng, p@24: input=layer0.output, p@24: #image_shape=(batch_size, nkerns[0], 12, 12), p@24: image_shape=(batch_size, nkerns[0], 30, 128), p@24: #filter_shape=(nkerns[1], nkerns[0], 5, 5), p@24: filter_shape=(nkerns[1], nkerns[0], 8, 1), p@24: #poolsize=(2, 2) p@24: poolsize=(4, 1) p@24: ) p@24: p@24: # the HiddenLayer being fully-connected, it operates on 2D matrices of p@24: # shape (batch_size, num_pixels) (i.e matrix of rasterized images). p@24: # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4), p@24: # or (500, 50 * 4 * 4) = (500, 800) with the default values. p@24: layer2_input = layer1.output.flatten(2) p@24: p@24: # construct a fully-connected sigmoidal layer p@24: layer2 = HiddenLayer( p@24: rng, p@24: input=layer2_input, p@24: #n_in=nkerns[1] * 4 * 4, p@24: n_in=nkerns[1] * 5 * 128, p@24: n_out=500, p@24: #n_out=100, p@24: #activation=T.tanh p@24: activation=relu p@24: ) p@24: p@24: # classify the values of the fully-connected sigmoidal layer p@24: layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10) p@24: #layer4 = LogisticRegression(input=layer3.output, n_in=50, n_out=10) p@24: p@24: # the cost we minimize during training is the NLL of the model p@24: cost = layer3.negative_log_likelihood(y) p@24: p@24: # create a function to compute the mistakes that are made by the model p@24: test_model = theano.function( p@24: [index], p@24: layer3.errors(y), p@24: givens={ p@24: x: test_set_x[index * batch_size: (index + 1) * batch_size], p@24: y: test_set_y[index * batch_size: (index + 1) * batch_size] p@24: } p@24: ) p@24: p@24: validate_model = theano.function( p@24: [index], p@24: layer3.errors(y), p@24: givens={ p@24: x: valid_set_x[index * batch_size: (index + 1) * batch_size], p@24: y: valid_set_y[index * batch_size: (index + 1) * batch_size] p@24: } p@24: ) p@24: p@24: # Paulo: Set best param for MLP pre-training p@24: f = file('/homes/pchilguano/msc_project/dataset/genre_classification/\ p@24: best_params.pkl', 'rb') p@24: params0, params1, params2, params3 = cPickle.load(f) p@24: f.close() p@24: layer0.W.set_value(params0[0]) p@24: layer0.b.set_value(params0[1]) p@24: layer1.W.set_value(params1[0]) p@24: layer1.b.set_value(params1[1]) p@24: layer2.W.set_value(params2[0]) p@24: layer2.b.set_value(params2[1]) p@24: layer3.W.set_value(params3[0]) p@24: layer3.b.set_value(params3[1]) p@24: p@24: # create a list of all model parameters to be fit by gradient descent p@24: params = layer3.params + layer2.params + layer1.params + layer0.params p@24: #params = layer4.params + layer3.params + layer2.params + layer1.params + layer0.params p@24: p@24: # create a list of gradients for all model parameters p@24: grads = T.grad(cost, params) p@24: p@24: # train_model is a function that updates the model parameters by p@24: # SGD Since this model has many parameters, it would be tedious to p@24: # manually create an update rule for each model parameter. We thus p@24: # create the updates list by automatically looping over all p@24: # (params[i], grads[i]) pairs. p@24: updates = [ p@24: (param_i, param_i - learning_rate * grad_i) p@24: for param_i, grad_i in zip(params, grads) p@24: ] p@24: p@24: train_model = theano.function( p@24: [index], p@24: cost, p@24: updates=updates, p@24: givens={ p@24: x: train_set_x[index * batch_size: (index + 1) * batch_size], p@24: y: train_set_y[index * batch_size: (index + 1) * batch_size] p@24: } p@24: ) p@24: # end-snippet-1 p@24: p@24: ############### p@24: # TRAIN MODEL # p@24: ############### p@24: print '... training' p@24: # early-stopping parameters p@24: patience = 1000 # look as this many examples regardless p@24: patience_increase = 2 # wait this much longer when a new best is p@24: # found p@24: improvement_threshold = 0.995 # a relative improvement of this much is p@24: # considered significant p@24: validation_frequency = min(n_train_batches, patience / 2) p@24: # go through this many p@24: # minibatche before checking the network p@24: # on the validation set; in this case we p@24: # check every epoch p@24: p@24: best_validation_loss = numpy.inf p@24: best_iter = 0 p@24: test_score = 0. p@24: start_time = timeit.default_timer() p@24: p@24: epoch = 0 p@24: done_looping = False p@24: p@24: while (epoch < n_epochs) and (not done_looping): p@24: epoch = epoch + 1 p@24: for minibatch_index in xrange(n_train_batches): p@24: p@24: iter = (epoch - 1) * n_train_batches + minibatch_index p@24: p@24: if iter % 100 == 0: p@24: print 'training @ iter = ', iter p@24: cost_ij = train_model(minibatch_index) p@24: p@24: if (iter + 1) % validation_frequency == 0: p@24: p@24: # compute zero-one loss on validation set p@24: validation_losses = [validate_model(i) for i p@24: in xrange(n_valid_batches)] p@24: this_validation_loss = numpy.mean(validation_losses) p@24: print('epoch %i, minibatch %i/%i, validation error %f %%' % p@24: (epoch, minibatch_index + 1, n_train_batches, p@24: this_validation_loss * 100.)) p@24: p@24: # if we got the best validation score until now p@24: if this_validation_loss < best_validation_loss: p@24: p@24: #improve patience if loss improvement is good enough p@24: if this_validation_loss < best_validation_loss * \ p@24: improvement_threshold: p@24: patience = max(patience, iter * patience_increase) p@24: p@24: # save best validation score and iteration number p@24: best_validation_loss = this_validation_loss p@24: best_iter = iter p@24: p@24: # test it on the test set p@24: test_losses = [ p@24: test_model(i) p@24: for i in xrange(n_test_batches) p@24: ] p@24: test_score = numpy.mean(test_losses) p@24: print((' epoch %i, minibatch %i/%i, test error of ' p@24: 'best model %f %%') % p@24: (epoch, minibatch_index + 1, n_train_batches, p@24: test_score * 100.)) p@24: # Paulo: Get best parameters for MLP p@24: best_params0 = [param.get_value().copy() for param in layer0.params] p@24: best_params1 = [param.get_value().copy() for param in layer1.params] p@24: best_params2 = [param.get_value().copy() for param in layer2.params] p@24: best_params3 = [param.get_value().copy() for param in layer3.params] p@24: #best_params4 = [param.get_value().copy() for param in layer4.params] p@24: p@24: if patience <= iter: p@24: done_looping = True p@24: break p@24: p@24: end_time = timeit.default_timer() p@24: print('Optimization complete.') p@24: print('Best validation score of %f %% obtained at iteration %i, ' p@24: 'with test performance %f %%' % p@24: (best_validation_loss * 100., best_iter + 1, test_score * 100.)) p@24: print >> sys.stderr, ('The code for file ' + p@24: os.path.split(__file__)[1] + p@24: ' ran for %.2fm' % ((end_time - start_time) / 60.)) p@24: # Paulo: Save best param for MLP p@24: f = file('/homes/pchilguano/msc_project/dataset/genre_classification/\ p@24: best_params.pkl', 'wb') p@24: cPickle.dump( p@24: (best_params0, best_params1, best_params2, best_params3), p@24: f, p@24: protocol=cPickle.HIGHEST_PROTOCOL p@24: ) p@24: f.close() p@24: p@24: if __name__ == '__main__': p@24: evaluate_lenet5( p@24: learning_rate=0.01, p@24: n_epochs=200, p@24: dataset='/homes/pchilguano/msc_project/dataset/gtzan/features/\ p@24: gtzan_3sec_2.pkl', p@24: nkerns=[32, 32], p@24: batch_size=10 p@24: ) p@24: p@24: def experiment(state, channel): p@24: evaluate_lenet5(state.learning_rate, dataset=state.dataset) p@24: