p@24
|
1 """This tutorial introduces the LeNet5 neural network architecture
|
p@24
|
2 using Theano. LeNet5 is a convolutional neural network, good for
|
p@24
|
3 classifying images. This tutorial shows how to build the architecture,
|
p@24
|
4 and comes with all the hyper-parameters you need to reproduce the
|
p@24
|
5 paper's MNIST results.
|
p@24
|
6
|
p@24
|
7
|
p@24
|
8 This implementation simplifies the model in the following ways:
|
p@24
|
9
|
p@24
|
10 - LeNetConvPool doesn't implement location-specific gain and bias parameters
|
p@24
|
11 - LeNetConvPool doesn't implement pooling by average, it implements pooling
|
p@24
|
12 by max.
|
p@24
|
13 - Digit classification is implemented with a logistic regression rather than
|
p@24
|
14 an RBF network
|
p@24
|
15 - LeNet5 was not fully-connected convolutions at second layer
|
p@24
|
16
|
p@24
|
17 References:
|
p@24
|
18 - Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
|
p@24
|
19 Gradient-Based Learning Applied to Document
|
p@24
|
20 Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
|
p@24
|
21 http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
|
p@24
|
22
|
p@24
|
23 """
|
p@24
|
24 import os
|
p@24
|
25 import sys
|
p@24
|
26 import timeit
|
p@24
|
27
|
p@24
|
28 import numpy
|
p@24
|
29
|
p@24
|
30 import theano
|
p@24
|
31 import theano.tensor as T
|
p@24
|
32 from theano.tensor.signal import downsample
|
p@24
|
33 from theano.tensor.nnet import conv
|
p@24
|
34
|
p@24
|
35 from logistic_sgd import LogisticRegression, load_data
|
p@24
|
36 from mlp import HiddenLayer
|
p@24
|
37
|
p@24
|
38 # Paulo Chiliguano: Additional libraries
|
p@24
|
39 import cPickle
|
p@24
|
40 from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
|
p@24
|
41
|
p@24
|
42 # Paulo Chiliguano: Rectifier Linear Unit
|
p@24
|
43 # Source: http://stackoverflow.com/questions/26497564/theano-hiddenlayer-activation-function
|
p@24
|
44 def relu(x):
|
p@24
|
45 return T.maximum(0.,x)
|
p@24
|
46
|
p@24
|
47 # Paulo: Random Streams
|
p@24
|
48 srng = RandomStreams(seed=234)
|
p@24
|
49
|
p@24
|
50 class LeNetConvPoolLayer(object):
|
p@24
|
51 """Pool Layer of a convolutional network """
|
p@24
|
52
|
p@24
|
53 def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
|
p@24
|
54 """
|
p@24
|
55 Allocate a LeNetConvPoolLayer with shared variable internal parameters.
|
p@24
|
56
|
p@24
|
57 :type rng: numpy.random.RandomState
|
p@24
|
58 :param rng: a random number generator used to initialize weights
|
p@24
|
59
|
p@24
|
60 :type input: theano.tensor.dtensor4
|
p@24
|
61 :param input: symbolic image tensor, of shape image_shape
|
p@24
|
62
|
p@24
|
63 :type filter_shape: tuple or list of length 4
|
p@24
|
64 :param filter_shape: (number of filters, num input feature maps,
|
p@24
|
65 filter height, filter width)
|
p@24
|
66
|
p@24
|
67 :type image_shape: tuple or list of length 4
|
p@24
|
68 :param image_shape: (batch size, num input feature maps,
|
p@24
|
69 image height, image width)
|
p@24
|
70
|
p@24
|
71 :type poolsize: tuple or list of length 2
|
p@24
|
72 :param poolsize: the downsampling (pooling) factor (#rows, #cols)
|
p@24
|
73 """
|
p@24
|
74
|
p@24
|
75 assert image_shape[1] == filter_shape[1]
|
p@24
|
76 self.input = input
|
p@24
|
77
|
p@24
|
78 # there are "num input feature maps * filter height * filter width"
|
p@24
|
79 # inputs to each hidden unit
|
p@24
|
80 fan_in = numpy.prod(filter_shape[1:])
|
p@24
|
81 # each unit in the lower layer receives a gradient from:
|
p@24
|
82 # "num output feature maps * filter height * filter width" /
|
p@24
|
83 # pooling size
|
p@24
|
84 fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
|
p@24
|
85 numpy.prod(poolsize))
|
p@24
|
86 # initialize weights with random weights
|
p@24
|
87 W_bound = numpy.sqrt(6. / (fan_in + fan_out))
|
p@24
|
88 self.W = theano.shared(
|
p@24
|
89 numpy.asarray(
|
p@24
|
90 rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
|
p@24
|
91 dtype=theano.config.floatX
|
p@24
|
92 ),
|
p@24
|
93 borrow=True
|
p@24
|
94 )
|
p@24
|
95
|
p@24
|
96 # the bias is a 1D tensor -- one bias per output feature map
|
p@24
|
97 b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
|
p@24
|
98 self.b = theano.shared(value=b_values, borrow=True)
|
p@24
|
99
|
p@24
|
100 # convolve input feature maps with filters
|
p@24
|
101 conv_out = conv.conv2d(
|
p@24
|
102 input=input,
|
p@24
|
103 filters=self.W,
|
p@24
|
104 filter_shape=filter_shape,
|
p@24
|
105 image_shape=image_shape
|
p@24
|
106 )
|
p@24
|
107
|
p@24
|
108 # downsample each feature map individually, using maxpooling
|
p@24
|
109 pooled_out = downsample.max_pool_2d(
|
p@24
|
110 input=conv_out,
|
p@24
|
111 ds=poolsize,
|
p@24
|
112 ignore_border=True
|
p@24
|
113 )
|
p@24
|
114
|
p@24
|
115 # Paulo: dropout
|
p@24
|
116 # Source: https://github.com/Newmu/Theano-Tutorials/blob/master/5_convolutional_net.py
|
p@24
|
117 retain_prob = 1 - 0.20
|
p@24
|
118 pooled_out *= srng.binomial(
|
p@24
|
119 pooled_out.shape,
|
p@24
|
120 p=retain_prob,
|
p@24
|
121 dtype=theano.config.floatX)
|
p@24
|
122 pooled_out /= retain_prob
|
p@24
|
123
|
p@24
|
124 # add the bias term. Since the bias is a vector (1D array), we first
|
p@24
|
125 # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
|
p@24
|
126 # thus be broadcasted across mini-batches and feature map
|
p@24
|
127 # width & height
|
p@24
|
128 #self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
|
p@24
|
129 self.output = relu(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
|
p@24
|
130
|
p@24
|
131 # store parameters of this layer
|
p@24
|
132 self.params = [self.W, self.b]
|
p@24
|
133
|
p@24
|
134 # keep track of model input
|
p@24
|
135 self.input = input
|
p@24
|
136
|
p@24
|
137
|
p@24
|
138 def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
|
p@24
|
139 dataset='mnist.pkl.gz',
|
p@24
|
140 nkerns=[20, 50], batch_size=500):
|
p@24
|
141 """ Demonstrates lenet on MNIST dataset
|
p@24
|
142
|
p@24
|
143 :type learning_rate: float
|
p@24
|
144 :param learning_rate: learning rate used (factor for the stochastic
|
p@24
|
145 gradient)
|
p@24
|
146
|
p@24
|
147 :type n_epochs: int
|
p@24
|
148 :param n_epochs: maximal number of epochs to run the optimizer
|
p@24
|
149
|
p@24
|
150 :type dataset: string
|
p@24
|
151 :param dataset: path to the dataset used for training /testing (MNIST here)
|
p@24
|
152
|
p@24
|
153 :type nkerns: list of ints
|
p@24
|
154 :param nkerns: number of kernels on each layer
|
p@24
|
155 """
|
p@24
|
156
|
p@24
|
157 rng = numpy.random.RandomState(23455)
|
p@24
|
158
|
p@24
|
159 datasets = load_data(dataset)
|
p@24
|
160
|
p@24
|
161 train_set_x, train_set_y = datasets[0]
|
p@24
|
162 valid_set_x, valid_set_y = datasets[1]
|
p@24
|
163 test_set_x, test_set_y = datasets[2]
|
p@24
|
164
|
p@24
|
165 # compute number of minibatches for training, validation and testing
|
p@24
|
166 n_train_batches = train_set_x.get_value(borrow=True).shape[0]
|
p@24
|
167 n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
|
p@24
|
168 n_test_batches = test_set_x.get_value(borrow=True).shape[0]
|
p@24
|
169
|
p@24
|
170 n_train_batches /= batch_size
|
p@24
|
171 n_valid_batches /= batch_size
|
p@24
|
172 n_test_batches /= batch_size
|
p@24
|
173
|
p@24
|
174 # allocate symbolic variables for the data
|
p@24
|
175 index = T.lscalar() # index to a [mini]batch
|
p@24
|
176
|
p@24
|
177 # start-snippet-1
|
p@24
|
178 x = T.matrix('x') # the data is presented as rasterized images
|
p@24
|
179 y = T.ivector('y') # the labels are presented as 1D vector of
|
p@24
|
180 # [int] labels
|
p@24
|
181
|
p@24
|
182 ######################
|
p@24
|
183 # BUILD ACTUAL MODEL #
|
p@24
|
184 ######################
|
p@24
|
185 print '... building the model'
|
p@24
|
186
|
p@24
|
187 # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
|
p@24
|
188 # to a 4D tensor, compatible with our LeNetConvPoolLayer
|
p@24
|
189 # (28, 28) is the size of MNIST images.
|
p@24
|
190 #layer0_input = x.reshape((batch_size, 1, 28, 28))
|
p@24
|
191 layer0_input = x.reshape((batch_size, 1, 130, 128))
|
p@24
|
192 # Construct the first convolutional pooling layer:
|
p@24
|
193 # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
|
p@24
|
194 # maxpooling reduces this further to (24/2, 24/2) = (12, 12)
|
p@24
|
195 # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
|
p@24
|
196 layer0 = LeNetConvPoolLayer(
|
p@24
|
197 rng,
|
p@24
|
198 input=layer0_input,
|
p@24
|
199 #image_shape=(batch_size, 1, 28, 28),
|
p@24
|
200 image_shape=(batch_size, 1, 130, 128),
|
p@24
|
201 #filter_shape=(nkerns[0], 1, 5, 5),
|
p@24
|
202 filter_shape=(nkerns[0], 1, 8, 1),
|
p@24
|
203 #poolsize=(2, 2)
|
p@24
|
204 poolsize=(4, 1)
|
p@24
|
205 )
|
p@24
|
206
|
p@24
|
207 # Construct the second convolutional pooling layer
|
p@24
|
208 # filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
|
p@24
|
209 # maxpooling reduces this further to (8/2, 8/2) = (4, 4)
|
p@24
|
210 # 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
|
p@24
|
211 layer1 = LeNetConvPoolLayer(
|
p@24
|
212 rng,
|
p@24
|
213 input=layer0.output,
|
p@24
|
214 #image_shape=(batch_size, nkerns[0], 12, 12),
|
p@24
|
215 image_shape=(batch_size, nkerns[0], 30, 128),
|
p@24
|
216 #filter_shape=(nkerns[1], nkerns[0], 5, 5),
|
p@24
|
217 filter_shape=(nkerns[1], nkerns[0], 8, 1),
|
p@24
|
218 #poolsize=(2, 2)
|
p@24
|
219 poolsize=(4, 1)
|
p@24
|
220 )
|
p@24
|
221
|
p@24
|
222 # the HiddenLayer being fully-connected, it operates on 2D matrices of
|
p@24
|
223 # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
|
p@24
|
224 # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
|
p@24
|
225 # or (500, 50 * 4 * 4) = (500, 800) with the default values.
|
p@24
|
226 layer2_input = layer1.output.flatten(2)
|
p@24
|
227
|
p@24
|
228 # construct a fully-connected sigmoidal layer
|
p@24
|
229 layer2 = HiddenLayer(
|
p@24
|
230 rng,
|
p@24
|
231 input=layer2_input,
|
p@24
|
232 #n_in=nkerns[1] * 4 * 4,
|
p@24
|
233 n_in=nkerns[1] * 5 * 128,
|
p@24
|
234 n_out=500,
|
p@24
|
235 #n_out=100,
|
p@24
|
236 #activation=T.tanh
|
p@24
|
237 activation=relu
|
p@24
|
238 )
|
p@24
|
239
|
p@24
|
240 # classify the values of the fully-connected sigmoidal layer
|
p@24
|
241 layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
|
p@24
|
242 #layer4 = LogisticRegression(input=layer3.output, n_in=50, n_out=10)
|
p@24
|
243
|
p@24
|
244 # the cost we minimize during training is the NLL of the model
|
p@24
|
245 cost = layer3.negative_log_likelihood(y)
|
p@24
|
246
|
p@24
|
247 # create a function to compute the mistakes that are made by the model
|
p@24
|
248 test_model = theano.function(
|
p@24
|
249 [index],
|
p@24
|
250 layer3.errors(y),
|
p@24
|
251 givens={
|
p@24
|
252 x: test_set_x[index * batch_size: (index + 1) * batch_size],
|
p@24
|
253 y: test_set_y[index * batch_size: (index + 1) * batch_size]
|
p@24
|
254 }
|
p@24
|
255 )
|
p@24
|
256
|
p@24
|
257 validate_model = theano.function(
|
p@24
|
258 [index],
|
p@24
|
259 layer3.errors(y),
|
p@24
|
260 givens={
|
p@24
|
261 x: valid_set_x[index * batch_size: (index + 1) * batch_size],
|
p@24
|
262 y: valid_set_y[index * batch_size: (index + 1) * batch_size]
|
p@24
|
263 }
|
p@24
|
264 )
|
p@24
|
265
|
p@24
|
266 # Paulo: Set best param for MLP pre-training
|
p@24
|
267 f = file('/homes/pchilguano/msc_project/dataset/genre_classification/\
|
p@24
|
268 best_params.pkl', 'rb')
|
p@24
|
269 params0, params1, params2, params3 = cPickle.load(f)
|
p@24
|
270 f.close()
|
p@24
|
271 layer0.W.set_value(params0[0])
|
p@24
|
272 layer0.b.set_value(params0[1])
|
p@24
|
273 layer1.W.set_value(params1[0])
|
p@24
|
274 layer1.b.set_value(params1[1])
|
p@24
|
275 layer2.W.set_value(params2[0])
|
p@24
|
276 layer2.b.set_value(params2[1])
|
p@24
|
277 layer3.W.set_value(params3[0])
|
p@24
|
278 layer3.b.set_value(params3[1])
|
p@24
|
279
|
p@24
|
280 # create a list of all model parameters to be fit by gradient descent
|
p@24
|
281 params = layer3.params + layer2.params + layer1.params + layer0.params
|
p@24
|
282 #params = layer4.params + layer3.params + layer2.params + layer1.params + layer0.params
|
p@24
|
283
|
p@24
|
284 # create a list of gradients for all model parameters
|
p@24
|
285 grads = T.grad(cost, params)
|
p@24
|
286
|
p@24
|
287 # train_model is a function that updates the model parameters by
|
p@24
|
288 # SGD Since this model has many parameters, it would be tedious to
|
p@24
|
289 # manually create an update rule for each model parameter. We thus
|
p@24
|
290 # create the updates list by automatically looping over all
|
p@24
|
291 # (params[i], grads[i]) pairs.
|
p@24
|
292 updates = [
|
p@24
|
293 (param_i, param_i - learning_rate * grad_i)
|
p@24
|
294 for param_i, grad_i in zip(params, grads)
|
p@24
|
295 ]
|
p@24
|
296
|
p@24
|
297 train_model = theano.function(
|
p@24
|
298 [index],
|
p@24
|
299 cost,
|
p@24
|
300 updates=updates,
|
p@24
|
301 givens={
|
p@24
|
302 x: train_set_x[index * batch_size: (index + 1) * batch_size],
|
p@24
|
303 y: train_set_y[index * batch_size: (index + 1) * batch_size]
|
p@24
|
304 }
|
p@24
|
305 )
|
p@24
|
306 # end-snippet-1
|
p@24
|
307
|
p@24
|
308 ###############
|
p@24
|
309 # TRAIN MODEL #
|
p@24
|
310 ###############
|
p@24
|
311 print '... training'
|
p@24
|
312 # early-stopping parameters
|
p@24
|
313 patience = 1000 # look as this many examples regardless
|
p@24
|
314 patience_increase = 2 # wait this much longer when a new best is
|
p@24
|
315 # found
|
p@24
|
316 improvement_threshold = 0.995 # a relative improvement of this much is
|
p@24
|
317 # considered significant
|
p@24
|
318 validation_frequency = min(n_train_batches, patience / 2)
|
p@24
|
319 # go through this many
|
p@24
|
320 # minibatche before checking the network
|
p@24
|
321 # on the validation set; in this case we
|
p@24
|
322 # check every epoch
|
p@24
|
323
|
p@24
|
324 best_validation_loss = numpy.inf
|
p@24
|
325 best_iter = 0
|
p@24
|
326 test_score = 0.
|
p@24
|
327 start_time = timeit.default_timer()
|
p@24
|
328
|
p@24
|
329 epoch = 0
|
p@24
|
330 done_looping = False
|
p@24
|
331
|
p@24
|
332 while (epoch < n_epochs) and (not done_looping):
|
p@24
|
333 epoch = epoch + 1
|
p@24
|
334 for minibatch_index in xrange(n_train_batches):
|
p@24
|
335
|
p@24
|
336 iter = (epoch - 1) * n_train_batches + minibatch_index
|
p@24
|
337
|
p@24
|
338 if iter % 100 == 0:
|
p@24
|
339 print 'training @ iter = ', iter
|
p@24
|
340 cost_ij = train_model(minibatch_index)
|
p@24
|
341
|
p@24
|
342 if (iter + 1) % validation_frequency == 0:
|
p@24
|
343
|
p@24
|
344 # compute zero-one loss on validation set
|
p@24
|
345 validation_losses = [validate_model(i) for i
|
p@24
|
346 in xrange(n_valid_batches)]
|
p@24
|
347 this_validation_loss = numpy.mean(validation_losses)
|
p@24
|
348 print('epoch %i, minibatch %i/%i, validation error %f %%' %
|
p@24
|
349 (epoch, minibatch_index + 1, n_train_batches,
|
p@24
|
350 this_validation_loss * 100.))
|
p@24
|
351
|
p@24
|
352 # if we got the best validation score until now
|
p@24
|
353 if this_validation_loss < best_validation_loss:
|
p@24
|
354
|
p@24
|
355 #improve patience if loss improvement is good enough
|
p@24
|
356 if this_validation_loss < best_validation_loss * \
|
p@24
|
357 improvement_threshold:
|
p@24
|
358 patience = max(patience, iter * patience_increase)
|
p@24
|
359
|
p@24
|
360 # save best validation score and iteration number
|
p@24
|
361 best_validation_loss = this_validation_loss
|
p@24
|
362 best_iter = iter
|
p@24
|
363
|
p@24
|
364 # test it on the test set
|
p@24
|
365 test_losses = [
|
p@24
|
366 test_model(i)
|
p@24
|
367 for i in xrange(n_test_batches)
|
p@24
|
368 ]
|
p@24
|
369 test_score = numpy.mean(test_losses)
|
p@24
|
370 print((' epoch %i, minibatch %i/%i, test error of '
|
p@24
|
371 'best model %f %%') %
|
p@24
|
372 (epoch, minibatch_index + 1, n_train_batches,
|
p@24
|
373 test_score * 100.))
|
p@24
|
374 # Paulo: Get best parameters for MLP
|
p@24
|
375 best_params0 = [param.get_value().copy() for param in layer0.params]
|
p@24
|
376 best_params1 = [param.get_value().copy() for param in layer1.params]
|
p@24
|
377 best_params2 = [param.get_value().copy() for param in layer2.params]
|
p@24
|
378 best_params3 = [param.get_value().copy() for param in layer3.params]
|
p@24
|
379 #best_params4 = [param.get_value().copy() for param in layer4.params]
|
p@24
|
380
|
p@24
|
381 if patience <= iter:
|
p@24
|
382 done_looping = True
|
p@24
|
383 break
|
p@24
|
384
|
p@24
|
385 end_time = timeit.default_timer()
|
p@24
|
386 print('Optimization complete.')
|
p@24
|
387 print('Best validation score of %f %% obtained at iteration %i, '
|
p@24
|
388 'with test performance %f %%' %
|
p@24
|
389 (best_validation_loss * 100., best_iter + 1, test_score * 100.))
|
p@24
|
390 print >> sys.stderr, ('The code for file ' +
|
p@24
|
391 os.path.split(__file__)[1] +
|
p@24
|
392 ' ran for %.2fm' % ((end_time - start_time) / 60.))
|
p@24
|
393 # Paulo: Save best param for MLP
|
p@24
|
394 f = file('/homes/pchilguano/msc_project/dataset/genre_classification/\
|
p@24
|
395 best_params.pkl', 'wb')
|
p@24
|
396 cPickle.dump(
|
p@24
|
397 (best_params0, best_params1, best_params2, best_params3),
|
p@24
|
398 f,
|
p@24
|
399 protocol=cPickle.HIGHEST_PROTOCOL
|
p@24
|
400 )
|
p@24
|
401 f.close()
|
p@24
|
402
|
p@24
|
403 if __name__ == '__main__':
|
p@24
|
404 evaluate_lenet5(
|
p@24
|
405 learning_rate=0.01,
|
p@24
|
406 n_epochs=200,
|
p@24
|
407 dataset='/homes/pchilguano/msc_project/dataset/gtzan/features/\
|
p@24
|
408 gtzan_3sec_2.pkl',
|
p@24
|
409 nkerns=[32, 32],
|
p@24
|
410 batch_size=10
|
p@24
|
411 )
|
p@24
|
412
|
p@24
|
413 def experiment(state, channel):
|
p@24
|
414 evaluate_lenet5(state.learning_rate, dataset=state.dataset)
|
p@24
|
415
|