annotate layers_custom.py @ 1:04f1e3463466 tip master

Implement maxpooling and unpooling aspect
author Dan Stowell <danstowell@users.sourceforge.net>
date Wed, 13 Jan 2016 09:56:16 +0000
parents 73317239d6d1
children
rev   line source
danstowell@0 1
danstowell@0 2 import numpy as np
danstowell@0 3
danstowell@0 4 import theano
danstowell@0 5 import theano.tensor as T
danstowell@0 6
danstowell@0 7 from lasagne.layers.base import Layer
danstowell@0 8
danstowell@0 9 ###############################################################################################################
danstowell@0 10
danstowell@0 11 class NormalisationLayer(Layer):
danstowell@0 12 """
danstowell@0 13 This layer applies a simple mean-and-std normalisation to input data.
danstowell@0 14 This allows you to "learn" the mean+std from training data and then apply it "live" to any future incoming data.
danstowell@0 15
danstowell@0 16 NOTE: the parameters are NOT learnt during training, but must be initialised BEFORE training using the set_normalisation() function.
danstowell@0 17 """
danstowell@0 18 def __init__(self, incoming, numbins, **kwargs):
danstowell@0 19 "numbins is the number of frequency bins in the spectrograms we're going to be normalising"
danstowell@0 20 super(NormalisationLayer, self).__init__(incoming, **kwargs)
danstowell@0 21 self.numbins = numbins
danstowell@0 22 self._output_shape = None
danstowell@0 23 self.initialised = False
danstowell@0 24 # and for the normalisation, per frequency bin - typically, we "sub" the mean and then "mul" by 1/std (I write this as mul rather than div because often more efficient)
danstowell@0 25 self.normn_sub = theano.shared(np.zeros((1, 1, numbins, 1), dtype=theano.config.floatX), borrow=True, name='norm_sub', broadcastable=(1, 1, 0, 1))
danstowell@0 26 self.normn_mul = theano.shared(np.ones( (1, 1, numbins, 1), dtype=theano.config.floatX), borrow=True, name='norm_mul', broadcastable=(1, 1, 0, 1))
danstowell@0 27 # here we're defining a theano func that I can use to "manually" normalise some data if needed as a separate thing
danstowell@0 28 inputdata = T.tensor4('inputdata')
danstowell@0 29 self.transform_some_data = theano.function([inputdata], (inputdata - self.normn_sub) * self.normn_mul)
danstowell@0 30
danstowell@0 31 def get_output_shape_for(self, input_shape):
danstowell@0 32 return input_shape
danstowell@0 33
danstowell@0 34 def get_output_for(self, inputdata, **kwargs):
danstowell@0 35 #if not self.initialised:
danstowell@0 36 # print("NormalisationLayer must be initalised with normalisation parameters before training")
danstowell@0 37 return (inputdata - self.normn_sub) * self.normn_mul
danstowell@0 38
danstowell@0 39 def set_normalisation(self, databatches):
danstowell@0 40 numbins = self.numbins
danstowell@0 41 # we first collapse the data batches, essentially into one very long spectrogram...
danstowell@0 42 #print("databatches.shape: %s" % str(databatches.shape))
danstowell@0 43 data = np.concatenate(np.vstack(np.vstack(databatches)), axis=-1)
danstowell@0 44 #print("data.shape: %s" % str(data.shape))
danstowell@0 45
danstowell@0 46 centre = np.mean(data, axis=1)
danstowell@0 47 self.normn_sub.set_value( centre.astype(theano.config.floatX).reshape((1,1,numbins,1)), borrow=True)
danstowell@0 48 self.normn_mul.set_value(1. / data.std( axis=1).reshape((1,1,-1,1)), borrow=True)
danstowell@0 49
danstowell@0 50 self.initialised = True
danstowell@0 51