changeset 45:6e9154fc58df

moving the code files to the synpy package directory
author christopherh <christopher.harte@eecs.qmul.ac.uk>
date Thu, 23 Apr 2015 23:52:04 +0100
parents 144460f34b5e
children 34fec1bc4770
files Syncopation models/synpy/KTH.py Syncopation models/synpy/LHL.py Syncopation models/synpy/PRS.py Syncopation models/synpy/SG.py Syncopation models/synpy/TMC.py Syncopation models/synpy/TOB.py Syncopation models/synpy/WNBD.py Syncopation models/synpy/__init__.py Syncopation models/synpy/basic_functions.py Syncopation models/synpy/midiparser.py Syncopation models/synpy/music_objects.py Syncopation models/synpy/parameter_setter.py Syncopation models/synpy/readmidi.py Syncopation models/synpy/rhythm_parser.py Syncopation models/synpy/syncopation.py
diffstat 15 files changed, 2294 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/KTH.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,122 @@
+'''
+Author: Chunyang Song
+Institution: Centre for Digital Music, Queen Mary University of London
+
+'''
+
+from basic_functions import get_note_indices, repeat, velocity_sequence_to_min_timespan
+
+# To find the nearest power of 2 equal to or less than the given number
+def round_down_power_2(number):
+	i = 0
+	if number > 0:
+		while pow(2,i) > number or number >= pow(2,i+1):
+			i = i+1
+		power2 = pow(2,i)
+	else:
+		print 'Error: numbers that are less than 1 cannot be rounded down to its nearest power of two.'
+		power2 = None
+	return power2
+
+# To find the nearest power of 2 equal to or more than the given number
+def round_up_power_2(number):
+	i = 0
+	while pow(2,i) < number:
+		i = i + 1
+	return pow(2,i)
+
+# To examine whether start_time is 'off-beat'
+def start_time_offbeat_measure(startTime, c_n):
+	measure = 0
+	if startTime % c_n != 0:
+		measure = 2
+	return measure
+
+# To examine whether end_time is 'off-beat'
+def end_time_offbeat_measure(endTime, c_n):
+	measure = 0
+	if endTime % c_n != 0:
+		measure = 1
+	return measure
+
+def get_syncopation(bar, parameters = None):
+	syncopation = None
+
+	# KTH only deals with simple-duple meter where the number of beats per bar is a power of two.
+	numerator = bar.get_time_signature().get_numerator()
+	if numerator != round_down_power_2(numerator):
+		print 'Warning: KTH model detects non simple-duple meter so returning None.'
+	else:
+		# retrieve note-sequence and next bar's note-sequence
+		noteSequence = bar.get_note_sequence()
+		#for note in noteSequence:
+		#	print note.to_string()
+		#print 'barlength',bar.get_bar_ticks()
+
+		nextbarNoteSequence = None
+		if bar.get_next_bar() != None:
+			nextbarNoteSequence = bar.get_next_bar().get_note_sequence()
+
+		# convert note sequence to its minimum time-span representation so that the later calculation can be faster
+		# noteSequence = note_sequence_to_min_timespan(noteSequence)
+		# find delta_t 
+		Tmin = len(velocity_sequence_to_min_timespan(bar.get_velocity_sequence()))
+		#print 'Tmin',Tmin
+		T = round_up_power_2(Tmin)
+		#print 'T',T
+		deltaT = float(bar.get_bar_ticks())/T
+		#print 'delta',deltaT
+
+
+		# calculate syncopation note by note
+		syncopation = 0
+
+		for note in noteSequence:
+			c_n = round_down_power_2(note.duration/deltaT)
+			#print 'd', note.duration
+			#print 'c_n', c_n
+			endTime = note.startTime + note.duration
+			#print float(note.startTime)/deltaT, float(endTime)/deltaT
+			syncopation = syncopation + start_time_offbeat_measure(float(note.startTime)/deltaT,c_n) + end_time_offbeat_measure(float(endTime)/deltaT,c_n)
+
+
+	return syncopation
+
+# # To calculate syncopation value of the sequence in the given time-signature.
+# def get_syncopation(seq, timesig, postbar_seq):
+# 	syncopation = 0
+
+# 	numerator = int(timesig.split("/")[0])
+# 	if numerator == round_down_power_2(numerator):	# if is a binary time-signature
+# 		# converting to minimum time-span format
+# 		seq = get_min_timeSpan(seq)	
+# 		if postbar_seq != None:
+# 			postbar_seq = get_min_timeSpan(postbar_seq)
+
+# 		# sf is a stretching factor matching rhythm sequence and meter, as Keith defines the note duration as a multiple of 1/(2^d) beats where d is number of metrical level
+# 		sf = round_up_power_2(len(seq))
+		
+# 		# retrieve all the indices of all the notes in this sequence
+# 		note_indices = get_note_indices(seq)
+
+# 		for i in range(len(note_indices)):
+# 			# Assuming start_time is the index of this note, end_time is the index of the following note
+# 			start_time = note_indices[i]*sf/float(len(seq))
+
+# 			if i == len(note_indices)-1:	# if this is the last note, end_time is the index of the following note in the next bar
+# 				if postbar_seq != None and postbar_seq != repeat([0],len(postbar_seq)):
+# 					next_index = get_note_indices(postbar_seq)[0]+len(seq)
+# 					end_time = next_index*sf/float(len(seq))
+# 				else:	# or if the next bar is none or full rest, end_time is the end of this sequence.
+# 					end_time = sf
+# 			else:
+# 				end_time = note_indices[i+1]*sf/float(len(seq))
+
+# 			duration = end_time - start_time
+# 			c_n = round_down_power_2(duration)
+# 			syncopation = syncopation + start(start_time,c_n) + end(end_time,c_n)
+# 	else: 
+# 		print 'Error: KTH model can only deal with binary time-signature, e.g. 2/4 and 4/4. '
+# 		syncopation = None
+
+# 	return syncopation
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/LHL.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,98 @@
+'''
+Author: Chunyang Song
+Institution: Centre for Digital Music, Queen Mary University of London
+'''
+
+from basic_functions import concatenate, repeat, subdivide, ceiling, get_rhythm_category
+from parameter_setter import are_parameters_valid
+
+terminalNodes = []		# Global variable, storing all the terminal nodes from recursive tree structure in time order
+
+
+
+# Each terminnal node contains two properties: its node type (note or rest) and its metrical weight.
+class Node:
+	def __init__(self,nodeType,metricalWeight):
+		self.nodeType = nodeType
+		self.metricalWeight = metricalWeight
+
+# This function will recurse the tree for a binary sequence and return a sequence containing the terminal nodes in time order.
+def recursive_tree(binarySequence, subdivisionSequence, weightSequence, metricalWeight, level):
+	# If matching to a Note type, add to terminal nodes
+	if binarySequence == concatenate([1],repeat([0],len(binarySequence)-1)):	
+		terminalNodes.append(Node('N',metricalWeight))
+
+	# If matching to a Rest type, add to terminal nodes
+	elif binarySequence == repeat([0],len(binarySequence)):					
+		terminalNodes.append(Node('R',metricalWeight))
+
+	# Keep subdividing by the subdivisor of the next level
+	else:													
+		subBinarySequences = subdivide(binarySequence, subdivisionSequence[level+1])	
+		subWeightSequences = concatenate([metricalWeight],repeat([weightSequence[level+1]],subdivisionSequence[level+1]-1))
+		for a in range(len(subBinarySequences)):
+			recursive_tree(subBinarySequences[a], subdivisionSequence, weightSequence, subWeightSequences[a], level+1)
+
+
+def get_syncopation(bar, parameters = None):
+	del terminalNodes[:]
+	syncopation = None
+
+	binarySequence = bar.get_binary_sequence()
+	subdivisionSequence = bar.get_subdivision_sequence()
+
+	# LHL can only measure monorhythms
+	if get_rhythm_category(binarySequence, subdivisionSequence) == 'poly':
+		print 'Warning: LHL model detects polyrhythms so returning None.'
+	else:
+		# set defaults
+		Lmax = 5
+		weightSequence = range(0,-Lmax-1,-1)
+		# if parameters are specified by users, check their validities and update parameters if valid		
+		if parameters!= None:
+			if 'Lmax' in parameters:
+				Lmax = parameters['Lmax']				
+			if 'W' in parameters:
+				weightSequence = parameters['W']
+
+		if not are_parameters_valid(Lmax, weightSequence, subdivisionSequence):
+			print 'Error: the given parameters are not valid.'
+		else:
+			# If there is rhythm in previous bar, process its tree structure
+			prevbar =  bar.get_previous_bar()
+			if prevbar != None and not prevbar.is_empty():
+				prebarBinarySequence = prevbar.get_binary_sequence()
+				recursive_tree(ceiling(prebarBinarySequence), subdivisionSequence, weightSequence, weightSequence[0],0)
+				
+				if len(terminalNodes)>0:
+					# Only keep the last note-type node
+					while terminalNodes[-1].nodeType != 'N':
+						del terminalNodes[-1]
+					del terminalNodes[0:-1]
+
+			# For the rhythm in the current bar, process its tree structure and store the terminal nodes 
+			recursive_tree(ceiling(binarySequence), subdivisionSequence, weightSequence, weightSequence[0],0)
+			
+			# for t in terminalNodes:
+			# 	print '<', t.nodeType, t.metricalWeight, '>'
+
+			# Search for the NR pairs that contribute to syncopation,then add the weight-difference to the NRpairSyncopation list
+			NRpairSyncopation = []
+			for i in range(len(terminalNodes)-1,0,-1):
+				if terminalNodes[i].nodeType == 'R':
+					for j in range(i-1, -1, -1):
+						if (terminalNodes[j].nodeType == 'N') & (terminalNodes[i].metricalWeight >= terminalNodes[j].metricalWeight):
+							NRpairSyncopation.append(terminalNodes[i].metricalWeight - terminalNodes[j].metricalWeight)
+							break
+			#print NRpairSyncopation
+
+			# If there are syncopation, sum all the local syncopation values stored in NRpairSyncopation list	
+			if len(NRpairSyncopation) != 0:
+				syncopation = sum(NRpairSyncopation)
+			# If no syncopation, the value is -1; 	
+			elif len(terminalNodes) != 0:
+				syncopation = -1
+
+	return syncopation
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/PRS.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,73 @@
+'''
+Author: Chunyang Song
+Institution: Centre for Digital Music, Queen Mary University of London
+'''
+
+from basic_functions import repeat, subdivide, ceiling, velocity_sequence_to_min_timespan, get_rhythm_category
+
+def get_cost(sequence,nextSequence):
+	sequence = velocity_sequence_to_min_timespan(sequence)					# converting to the minimum time-span format
+	
+	if sequence[1:] == repeat([0],len(sequence)-1):		# null prototype
+		cost = 0
+	elif sequence == repeat([1],len(sequence)):			# filled prototype
+		cost = 1
+	elif sequence[0] == 1 and sequence[-1] == 0:			# run1 prototype
+		cost = 2
+	elif sequence[0] == 1 and (nextSequence == None or nextSequence[0] == 0):	# run2 prototype
+		cost = 2
+	elif sequence[-1] == 1 and nextSequence != None and nextSequence[0] == 1:		# upbeat prototype
+		cost = 3
+	elif sequence[0] == 0:							# syncopated prototype
+		cost = 5
+
+	return cost
+
+# This function calculates the syncopation value (cost) for the sequence with the postbar_seq for a certain level. 
+def syncopation_perlevel(subSequences):
+	#print 'subSequences', subSequences
+	total = 0
+	for l in range(len(subSequences)-1):
+		#print 'cost', get_cost(subSequences[l], subSequences[l+1])
+		total = total + get_cost(subSequences[l], subSequences[l+1])
+	#print 'total this level', total
+	normalised = float(total)/(len(subSequences)-1)
+	
+	return normalised
+
+def get_syncopation(bar, parameters = None):
+	syncopation = None
+
+	binarySequence = bar.get_binary_sequence()
+	subdivisionSequence = bar.get_subdivision_sequence()
+
+	# PRS does not handle polyrhythms
+	if get_rhythm_category(binarySequence, subdivisionSequence) == 'poly':
+		print 'Warning: PRS model detects polyrhythms so returning None.'
+	else:
+		syncopation = 0
+
+		# retrieve the binary sequence in the next bar
+		if bar.get_next_bar() != None:
+			nextbarBinarySequence = bar.get_next_bar().get_binary_sequence()
+		else:
+			nextbarBinarySequence = None
+
+		# numberOfSubSeqs is the number of sub-sequences at a certain metrical level, initialised to be 1 (at the bar level)
+		numberOfSubSeqs = 1	
+		for subdivisor in subdivisionSequence:
+			# numberOfSubSeqs is product of all the subdivisors up to the current level
+			numberOfSubSeqs = numberOfSubSeqs * subdivisor
+			
+			# recursion stops when the length of sub-sequence is less than 2
+			if len(binarySequence)/numberOfSubSeqs >= 2:		
+				# generate sub-sequences and append the next bar sequence
+				subSequences = subdivide(ceiling(binarySequence), numberOfSubSeqs)	
+				subSequences.append(nextbarBinarySequence)
+				# adding syncopation at each metrical level to the total syncopation
+				#print 'per level', syncopation_perlevel(subSequences)
+				syncopation += syncopation_perlevel(subSequences)	
+			else:
+				break
+
+	return syncopation
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/SG.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,88 @@
+'''
+Author: Chunyang Song
+Institution: Centre for Digital Music, Queen Mary University of London
+
+'''
+
+from basic_functions import get_H, velocity_sequence_to_min_timespan, get_rhythm_category, upsample_velocity_sequence
+from parameter_setter import are_parameters_valid
+
+def get_syncopation(bar, parameters = None):
+	syncopation = None
+	velocitySequence = bar.get_velocity_sequence()
+	subdivisionSequence = bar.get_subdivision_sequence()
+
+	if get_rhythm_category(velocitySequence, subdivisionSequence) == 'poly':
+		print 'Warning: SG model detects polyrhythms so returning None.'
+	else:
+		#velocitySequence = velocity_sequence_to_min_timespan(velocitySequence)	# converting to the minimum time-span format
+
+		# set the defaults
+		Lmax  = 5
+		weightSequence = range(Lmax+1) # i.e. [0,1,2,3,4,5]
+		if parameters!= None:
+			if 'Lmax' in parameters:
+				Lmax = parameters['Lmax']				
+			if 'W' in parameters:
+				weightSequence = parameters['W']
+
+		if not are_parameters_valid(Lmax, weightSequence, subdivisionSequence):
+			print 'Error: the given parameters are not valid.'
+		else:
+			# generate the metrical weights of level Lmax, and upsample(stretch) the velocity sequence to match the length of H
+			H = get_H(weightSequence,subdivisionSequence, Lmax)
+			
+			velocitySequence = upsample_velocity_sequence(velocitySequence, len(H))
+
+			# The ave_dif_neighbours function calculates the (weighted) average of the difference between the note at a certain index and its neighbours in a certain metrical level
+			def ave_dif_neighbours(index, level):
+
+				averages = []
+				parameterGarma = 0.8
+				
+				# The findPre function is to calculate the index of the previous neighbour at a certain metrical level.
+				def find_pre(index, level):
+					preIndex = (index - 1)%len(H)	# using % is to restrict the index varies within range(0, len(H))
+					while(H[preIndex] > level):
+						preIndex = (preIndex - 1)%len(H)
+					#print 'preIndex', preIndex
+					return preIndex
+
+				# The findPost function is to calculate the index of the next neighbour at a certain metrical level.
+				def find_post(index, level):
+					postIndex = (index + 1)%len(H)
+					while(H[postIndex] > level):
+						postIndex = (postIndex + 1)%len(H)
+					#print 'postIndex', postIndex
+					return postIndex
+				
+				# The dif function is to calculate a difference level factor between two notes (at note position index1 and index 2) in velocity sequence
+				def dif(index1,index2):
+					parameterBeta = 0.5
+					dif_v = velocitySequence[index1]-velocitySequence[index2]
+					dif_h = abs(H[index1]-H[index2])
+					dif = dif_v*(parameterBeta*dif_h/4+1-parameterBeta)
+					#print 'dif', dif
+					return dif
+
+				# From the highest to the lowest metrical levels where the current note resides, calculate the difference between the note and its neighbours at that level
+				for l in range(level, max(H)+1):
+					ave = (parameterGarma*dif(index,find_pre(index,l))+dif(index,find_post(index,l)) )/(1+parameterGarma)
+					averages.append(ave)
+				#print 'averages', averages
+				return averages
+
+			# if the upsampling was successfully done
+			if velocitySequence != None:
+				syncopation = 0			
+				# Calculate the syncopation value for each note
+				for index in range(len(velocitySequence)):
+					if velocitySequence[index] != 0: # Onset detected
+						h = H[index] 
+						# Syncopation potential according to its metrical level, which is equal to the metrical weight
+						potential = 1 - pow(0.5,h)
+						level = h 		# Metrical weight is equal to its metrical level
+						syncopation += min(ave_dif_neighbours(index, level))*potential
+			else:
+				print 'Try giving a bigger Lmax so that the rhythm sequence can be measured by the matching metrical weights sequence (H).'
+	return syncopation
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/TMC.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,85 @@
+'''
+Author: Chunyang Song
+Institution: Centre for Digital Music, Queen Mary University of London
+
+'''
+
+from basic_functions import get_H, ceiling, velocity_sequence_to_min_timespan, get_rhythm_category
+from parameter_setter import are_parameters_valid
+
+# The get_metricity function calculates the metricity for a binary sequence with given sequence of metrical weights in a certain metrical level.
+def get_metricity(binarySequence, H):
+	metricity = 0
+	for m in range(len(binarySequence)):
+		metricity = metricity + binarySequence[m]*H[m]
+	return metricity
+
+# The get_max_metricity function calculates the maximum metricity for the same number of notes in a binary sequence.
+def get_max_metricity(binarySequence, H):
+	maxMetricity = 0
+	H.sort(reverse=True) # Sort the metrical weight sequence from large to small
+	for i in range(sum(binarySequence)):
+		maxMetricity = maxMetricity+H[i]
+	return maxMetricity
+
+# find the metrical level L that contains the same number of metrical positions as the length of the binary sequence
+# if the given Lmax is not big enough to analyse the given sequence, request a bigger Lmax
+def find_L(rhythmSequence, Lmax, weightSequence, subdivisionSequence):
+	L = Lmax
+
+	# initially assuming the Lmax is not big enough
+	needBiggerLmax = True 
+	
+	# from the lowest metrical level (Lmax) to the highest, find the matching metrical level that 
+	# has the same length as the length of binary sequence  
+	while L >= 0:
+		if len(get_H(weightSequence,subdivisionSequence, L)) == len(rhythmSequence):
+			needBiggerLmax = False
+			break
+		else:
+			L = L - 1
+
+	# if need a bigger Lmax, print error message and return None; otherwise return the matching metrical level L
+	if needBiggerLmax:
+		print 'Error: needs a bigger L_max (i.e. the lowest metrical level) to match the given rhythm sequence.'
+		L = None
+	
+	return L
+
+# The get_syncopation function calculates the syncopation value of the given sequence for TMC model. 
+#def get_syncopation(seq, subdivision_seq, weight_seq, L_max, rhythm_category):
+def get_syncopation(bar, parameters = None):
+	syncopation = None
+	binarySequence = bar.get_binary_sequence()
+	subdivisionSequence = bar.get_subdivision_sequence()
+
+	if get_rhythm_category(binarySequence, subdivisionSequence) == 'poly':
+		print 'Warning: TMC model detects polyrhythms so returning None.'
+	else:
+		
+		# set the defaults
+		Lmax  = 5
+		weightSequence = range(Lmax+1,0,-1) # i.e. [6,5,4,3,2,1]
+		
+		if parameters!= None:
+			if 'Lmax' in parameters:
+				Lmax = parameters['Lmax']				
+			if 'W' in parameters:
+				weightSequence = parameters['W']
+
+		if not are_parameters_valid(Lmax, weightSequence, subdivisionSequence):
+			print 'Error: the given parameters are not valid.'
+		else:
+			binarySequence = velocity_sequence_to_min_timespan(binarySequence)	# converting to the minimum time-span format
+			L = find_L(binarySequence, Lmax, weightSequence, subdivisionSequence) 
+			if L != None:
+				#? generate the metrical weights of the lowest level, 
+				#? using the last matching_level number of elements in the weightSequence, to make sure the last element is 1
+				H = get_H (weightSequence[-(L+1):], subdivisionSequence, L)
+				metricity = get_metricity(binarySequence, H)	# converting to binary sequence then calculate metricity
+				maxMetricity = get_max_metricity(binarySequence, H)
+
+				syncopation = maxMetricity - metricity
+				
+	return syncopation
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/TOB.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,35 @@
+'''
+Author: Chunyang Song
+Institution: Centre for Digital Music, Queen Mary University of London
+
+'''
+
+from basic_functions import ceiling, find_divisor, is_prime, velocity_sequence_to_min_timespan
+
+def get_syncopation(bar, parameters = None):
+	binarySequence = velocity_sequence_to_min_timespan(bar.get_binary_sequence())
+	sequenceLength = len(binarySequence)
+
+	syncopation = 0
+
+	# if the length of b_sequence is 1 or a prime number, syncopation is 0;
+	# otherwise the syncopation is calculated by adding up the number of off-beat notes
+	if not ( (sequenceLength == 1) or (is_prime(sequenceLength)) ):
+		# find all the divisors other than 1 and the length of this sequence
+		divisors = find_divisor(sequenceLength)		
+		del divisors[0]
+		del divisors[-1]
+
+		# the on-beat/off-beat positions are the ones that can/cannot be subdivided by the sequenceLength;
+		# the on-beat positions are set to be 0, off-beat positions are set to be 1
+		offbeatness = [1]*sequenceLength			
+		for index in range(sequenceLength):
+			for d in divisors:
+				if index % d == 0:
+					offbeatness[index] = 0
+					break
+			#print 'offbeatness', offbeatness
+			# syncopation is the sum of the hadamard-product of the rhythm binary-sequence and the off-beatness 
+			syncopation += binarySequence[index]*offbeatness[index]
+	
+	return syncopation
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/WNBD.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,128 @@
+'''
+Author: Chunyang Song
+Institution: Centre for Digital Music, Queen Mary University of London
+
+'''
+from basic_functions import repeat, get_note_indices
+
+# To find the product of multiple numbers
+def cumu_multiply(numbers):
+	product = 1
+	for n in numbers:
+		product = product*n
+	return product
+
+def get_syncopation(bar, parameters = None):
+	syncopation = None
+	
+	noteSequence = bar.get_note_sequence()
+	barTicks = bar.get_bar_ticks()
+	subdivisionSequence = bar.get_subdivision_sequence()
+	strongBeatLevel = bar.get_beat_level()
+	
+	nextbarNoteSequence = None
+	if bar.get_next_bar() != None:
+		nextbarNoteSequence = bar.get_next_bar().get_note_sequence()
+
+	# calculate each strong beat ticks
+	numberOfBeats = cumu_multiply(subdivisionSequence[:strongBeatLevel+1])
+	beatIntervalTicks = barTicks/numberOfBeats
+	# beatsTicks represents the ticks for all the beats in the current bar and the first two beats in the next bar
+	beatsTicks = [i*beatIntervalTicks for i in range(numberOfBeats+2)] 
+	#print beatsTicks
+	totalSyncopation = 0
+	for note in noteSequence:
+	#	print note.to_string()
+		# find such beatIndex such that note.startTime is located between (including) beatsTicks[beatIndex] and (not including) beatsTicks[beatIndex+1]
+		beatIndex = 0
+		while note.startTime < beatsTicks[beatIndex] or note.startTime >= beatsTicks[beatIndex+1]:
+			beatIndex += 1
+
+	#	print beatIndex
+		# calculate the distance of this note to its nearest beat
+		distanceToBeatOnLeft = abs(note.startTime - beatsTicks[beatIndex])/float(beatIntervalTicks)
+		distanceToBeatOnRight = abs(note.startTime - beatsTicks[beatIndex+1])/float(beatIntervalTicks)
+		distanceToNearestBeat = min(distanceToBeatOnLeft,distanceToBeatOnRight)
+	#	print distanceToNearestBeat
+
+		# calculate the WNBD measure for this note, and add to total syncopation value for this bar
+		if distanceToNearestBeat == 0:	
+			totalSyncopation += 0
+		# or if this note is held on past the following beat, but ends on or before the later beat  
+		elif beatsTicks[beatIndex+1] < note.startTime+note.duration <= beatsTicks[beatIndex+2]:
+			totalSyncopation += float(2)/distanceToNearestBeat
+		else:
+			totalSyncopation += float(1)/distanceToNearestBeat
+	#	print totalSyncopation
+
+	return totalSyncopation
+
+#def get_syncopation(seq, subdivision_seq, strong_beat_level, postbar_seq):
+# def get_syncopation(bar, parameters = None):
+# 	syncopation = None
+	
+# 	binarySequence = bar.get_binary_sequence()
+# 	sequenceLength = len(binarySequence)
+# 	subdivisionSequence = bar.get_subdivision_sequence()
+# 	strongBeatLevel = bar.get_beat_level()
+# 	nextbarBinarySequence = None
+
+# 	if bar.get_next_bar() != None:
+# 		nextbarBinarySequence = bar.get_next_bar().get_binary_sequence()
+
+# 	numberOfBeats = cumu_multiply(subdivisionSequence[0:strongBeatLevel+1])	# numberOfBeats is the number of strong beats
+	
+# 	if sequenceLength % numberOfBeats != 0:
+# 		print 'Error: the length of sequence is not subdivable by the subdivision factor in subdivision sequence.'
+# 	else:
+# 		# Find the indices of all the strong-beats
+# 		beatIndices = []
+# 		beatInterval = sequenceLength / numberOfBeats
+# 		for i in range(numberOfBeats+1):
+# 			beatIndices.append(i*beatInterval)
+# 		if nextbarBinarySequence != None:		# if there is a postbar_seq, add another two beats index for later calculation
+# 			beatIndices += [sequenceLength+beatInterval, sequenceLength+ 2* beatInterval]
+
+# 		noteIndices = get_note_indices(binarySequence)	# all the notes
+
+# 		# Calculate the WNBD measure for each note
+# 		def measure_pernote(noteIndices, nextNoteIndex):
+# 			# Find the nearest beats where this note locates - in [beat_indices[j], beat_indices[j+1]) 
+# 			j = 0
+# 			while noteIndices < beatIndices[j] or noteIndices >= beatIndices[j+1]:
+# 				j = j + 1
+			
+# 			# The distance of note to nearest beat normalised by the beat interval
+# 			distanceToNearestBeat = min(abs(noteIndices - beatIndices[j]), abs(noteIndices - beatIndices[j+1]))/float(beatInterval)
+
+# 			# if this note is on-beat
+# 			if distanceToNearestBeat == 0:	
+# 				measure = 0
+# 			# or if this note is held on past the following beat, but ends on or before the later beat  
+# 			elif beatIndices[j+1] < nextNoteIndex <= beatIndices[j+2]:
+# 				measure = float(2)/distanceToNearestBeat
+# 			else:
+# 				measure = float(1)/distanceToNearestBeat
+# 			return measure
+
+# 		total = 0
+# 		for i in range(len(noteIndices)):
+# 			# if this is the last note, end_time is the index of the following note in the next bar
+# 			if i == len(noteIndices)-1:
+# 				# if the next bar is not none or a bar of full rest, 
+# 				# the nextNoteIndex is the sum of sequence length in the current bar and the noteIndex in the next bar
+# 				if nextbarBinarySequence != None and nextbarBinarySequence != repeat([0],len(nextbarBinarySequence)):
+# 					nextNoteIndex = get_note_indices(nextbarBinarySequence)[0]+sequenceLength
+# 				# else when the next bar is none or full rest, end_time is the end of this sequence.
+# 				else:
+# 					nextNoteIndex = sequenceLength
+# 			# else this is not the last note, the nextNoteIndex is the following element in the noteIndices list
+# 			else:
+# 				nextNoteIndex = noteIndices[i+1]
+# 			# sum up the syncopation value for individual note at noteIndices[i]
+# 			total += measure_pernote(noteIndices[i],nextNoteIndex)
+
+# 		#syncopation = float(total) / len(note_indices)
+
+# 	# return the total value, leave the normalisation done in the end
+# 	return total
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/__init__.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,16 @@
+from syncopation import calculate_syncopation
+__all__ = [	'syncopation',
+			'basic_functions', 
+			'music_objects', 
+			'parameter_setter', 
+			'rhythm_parser',
+			'readmidi',
+			#models
+			'KTH', 
+			'LHL',
+			'PRS',
+			'SG',
+			'TMC',
+			'TOB',
+			'WNBD'
+			]
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/basic_functions.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,221 @@
+# This python file is a collection of basic functions that are used in the syncopation models. 
+
+import math
+
+# The concatenation function is used to concatenate two sequences.
+def concatenate(seq1,seq2):
+	return seq1+seq2
+
+# The repetition function is to concatenate a sequence to itself for 'times' number of times.
+def repeat(seq,times):
+	new_seq = list(seq)
+	if times >= 1:
+		for i in range(times-1):
+			new_seq = concatenate(new_seq,seq)
+	else:
+		#print 'Error: repetition times needs to be no less than 1.'
+		new_seq = []
+	return new_seq
+
+# The subdivision function is to equally subdivide a sequence into 'divisor' number of segments.
+def subdivide(seq,divisor):
+	subSeq = []
+	if len(seq) % divisor != 0:
+		print 'Error: rhythmic sequence cannot be equally subdivided.'
+	else:
+		n = len(seq) / divisor
+		start , end = 0, n
+		for i in range(divisor):
+			subSeq.append(seq[start : end])
+			start = end
+			end = end + n	
+	return subSeq
+
+
+# The ceiling function is to round each number inside a sequence up to its nearest integer.
+def ceiling(seq):
+	seq_ceil = []
+	for s in seq:
+		seq_ceil.append(int(math.ceil(s)))
+	return seq_ceil
+
+# The find_divisor function returns a list of all possible divisors for a length of sequence.
+def find_divisor(number):
+	divisors = [1]
+	for i in range(2,number+1):
+		if number%i ==0:
+			divisors.append(i)
+	return divisors
+
+# The find_divisor function returns a list of all possible divisors for a length of sequence.
+def find_prime_factors(number):
+	primeFactors = find_divisor(number)
+	
+	# remove 1 because 1 is not prime number
+	del primeFactors[0]
+
+	# reversely traverse all the divisors list and once find a non-prime then delete
+	for i in range(len(primeFactors)-1,0,-1):
+	#	print primeFactors[i], is_prime(primeFactors[i])
+		if not is_prime(primeFactors[i]):
+			del primeFactors[i]
+
+	return primeFactors
+
+def is_prime(number):
+	isPrime = True
+	# 0 or 1 is not prime numbers
+	if number < 2:
+		isPrime = False
+	# 2 is the only even prime number
+	elif number == 2:
+		pass
+	# all the other even numbers are non-prime
+	elif number % 2 == 0:
+		isPrime = False
+	else:
+		for odd in range(3, int(math.sqrt(number) + 1), 2):
+			if number % odd == 0:
+				isPrime = False
+	return isPrime
+
+# upsample a velocity sequence to certain length, e.g. [1,1] to [1,0,0,0,1,0,0,0]
+def upsample_velocity_sequence(velocitySequence, length):
+	upsampledVelocitySequence = None
+	if length < len(velocitySequence):
+		print 'Error: the requested upsampling length needs to be longer than velocity sequence.'
+	elif length % len(velocitySequence) != 0:
+		print 'Error: velocity sequence can only be upsampled to a interger times of its own length.'
+	else:
+		upsampledVelocitySequence = [0]*length
+		scalingFactor = length/len(velocitySequence)
+		for index in range(len(velocitySequence)):
+			upsampledVelocitySequence[index*scalingFactor] = velocitySequence[index]
+	return upsampledVelocitySequence
+
+
+# convert a velocity sequence to its minimum time-span representation
+def velocity_sequence_to_min_timespan(velocitySequence):
+	from music_objects import VelocitySequence
+	minTimeSpanVelocitySeq = [1]
+	for divisors in find_divisor(len(velocitySequence)):
+		segments = subdivide(velocitySequence,divisors)
+		if len(segments)!=0:
+			del minTimeSpanVelocitySeq[:]
+			for s in segments:
+				minTimeSpanVelocitySeq.append(s[0])
+			if sum(minTimeSpanVelocitySeq) == sum(velocitySequence):
+				break
+	return VelocitySequence(minTimeSpanVelocitySeq)
+
+"""
+# convert a note sequence to its minimum time-span representation
+def note_sequence_to_min_timespan(noteSequence):
+	from music_objects import note_sequence_to_velocity_sequence
+	timeSpanTicks = len(note_sequence_to_velocity_sequence(noteSequence))
+#	print timeSpanTicks
+
+	barBinaryArray = [0]*(timeSpanTicks+1)
+	for note in noteSequence:
+		# mark note_on event (i.e. startTime) and note_off event (i.e. endTime = startTime + duration) as 1 in the barBinaryArray
+		barBinaryArray[note.startTime] = 1
+		barBinaryArray[note.startTime + note.duration] = 1
+
+	# convert the barBinaryArray to its minimum time-span representation
+	minBarBinaryArray = velocity_sequence_to_min_timetpan(barBinaryArray[:-1])
+	print barBinaryArray
+	print minBarBinaryArray
+	delta_t = len(barBinaryArray)/len(minBarBinaryArray)
+
+	# scale the startTime and duration of each note by delta_t
+	for note in noteSequence:
+		note.startTime = note.startTime/delta_t
+		note.duration = note.duration/delta_t
+
+	return noteSequence
+"""
+
+# get_note_indices returns all the indices of all the notes in this velocity_sequence
+def get_note_indices(velocitySequence):
+	noteIndices = []
+
+	for index in range(len(velocitySequence)):
+		if velocitySequence[index] != 0:
+			noteIndices.append(index)
+
+	return noteIndices
+
+
+# The get_H returns a sequence of metrical weight for a certain metrical level (horizontal),
+# given the sequence of metrical weights in a hierarchy (vertical) and a sequence of subdivisions.
+def get_H(weightSequence,subdivisionSequence, level):
+	H = []
+	#print len(weight_seq), len(subdivision_seq), level
+	if (level <= len(subdivisionSequence)-1) and (level <= len(weightSequence)-1):
+		if level == 0:
+			H = repeat([weightSequence[0]],subdivisionSequence[0])
+		else:
+			H_pre = get_H(weightSequence,subdivisionSequence,level-1)
+			for h in H_pre:
+				H = concatenate(H, concatenate([h], repeat([weightSequence[level]],subdivisionSequence[level]-1)))
+	else:
+		print 'Error: a subdivision factor or metrical weight is not defined for the request metrical level.'
+	return H
+
+
+def calculate_bar_ticks(numerator, denominator, ticksPerQuarter):
+	return (numerator * ticksPerQuarter *4) / denominator
+
+
+def get_rhythm_category(velocitySequence, subdivisionSequence):
+	'''
+	The get_rhythm_category function is used to detect rhythm category: monorhythm or polyrhythm.
+	For monorhythms, all prime factors of the length of minimum time-span representation of this sequence are
+	elements of its subdivision_seq, otherwise it is polyrhythm; 
+	e.g. prime_factors of polyrhythm 100100101010 in 4/4 is [2,3] but subdivision_seq = [1,2,2] for 4/4 
+	'''
+	rhythmCategory = 'mono'
+	for f in find_prime_factors(len(velocity_sequence_to_min_timespan(velocitySequence))):
+		if not (f in subdivisionSequence): 
+			rhythmCategory = 'poly'
+			break
+	return rhythmCategory
+
+
+def string_to_sequence(inputString,typeFunction=float):
+	return map(typeFunction, inputString.split(','))
+
+# # The get_subdivision_seq function returns the subdivision sequence of several common time-signatures defined by GTTM, 
+# # or ask for the top three level of subdivision_seq manually set by the user.
+# def get_subdivision_seq(timesig, L_max):
+# 	subdivision_seq = []
+
+# 	if timesig == '2/4' or timesig == '4/4':
+# 		subdivision_seq = [1,2,2]
+# 	elif timesig == '3/4' or timesig == '3/8':
+# 		subdivision_seq = [1,3,2]
+# 	elif timesig == '6/8':
+# 		subdivision_seq = [1,2,3]
+# 	elif timesig == '9/8':
+# 		subdivision_seq = [1,3,3]
+# 	elif timesig == '12/8':
+# 		subdivision_seq = [1,4,3]
+# 	elif timesig == '5/4' or timesig == '5/8':
+# 		subdivision_seq = [1,5,2]
+# 	elif timesig == '7/4' or timesig == '7/8':
+# 		subdivision_seq = [1,7,2]
+# 	elif timesig == '11/4' or timesig == '11/8':
+# 		subdivision_seq = [1,11,2]
+# 	else:
+# 		print 'Time-signature',timesig,'is undefined. Please indicate subdivision sequence for this requested time-signature, e.g. [1,2,2] for 4/4 meter.'
+# 		for i in range(3):
+# 			s = int(input('Enter the subdivision factor at metrical level '+str(i)+':'))
+# 			subdivision_seq.append(s)
+
+# 	if L_max > 2:
+# 		subdivision_seq = subdivision_seq + [2]*(L_max-2)
+# 	else:
+# 		subdivision_seq = subdivision_seq[0:L_max+1]
+	
+# 	return subdivision_seq
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/midiparser.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,420 @@
+""" 
+midi.py -- MIDI classes and parser in Python 
+Placed into the public domain in December 2001 by Will Ware 
+Python MIDI classes: meaningful data structures that represent MIDI 
+events and other objects. You can read MIDI files to create such objects, or 
+generate a collection of objects and use them to write a MIDI file. 
+Helpful MIDI info: 
+http://crystal.apana.org.au/ghansper/midi_introduction/midi_file_form... 
+http://www.argonet.co.uk/users/lenny/midi/mfile.html 
+""" 
+import sys, string, types, exceptions 
+debugflag = 0 
+
+
+def showstr(str, n=16): 
+    for x in str[:n]: 
+        print ('%02x' % ord(x)), 
+    print
+     
+def getNumber(str, length): 
+    # MIDI uses big-endian for everything 
+    sum = 0 
+    for i in range(length): 
+        sum = (sum << 8) + ord(str[i]) 
+    return sum, str[length:]
+ 
+def getVariableLengthNumber(str): 
+    sum = 0 
+    i = 0 
+    while 1: 
+        x = ord(str[i]) 
+        i = i + 1 
+        sum = (sum << 7) + (x & 0x7F) 
+        if not (x & 0x80): 
+            return sum, str[i:] 
+
+def putNumber(num, length): 
+    # MIDI uses big-endian for everything 
+    lst = [ ] 
+    for i in range(length): 
+        n = 8 * (length - 1 - i) 
+        lst.append(chr((num >> n) & 0xFF)) 
+    return string.join(lst, "") 
+
+def putVariableLengthNumber(x): 
+    lst = [ ] 
+    while 1: 
+        y, x = x & 0x7F, x >> 7 
+        lst.append(chr(y + 0x80)) 
+        if x == 0: 
+            break 
+    lst.reverse() 
+    lst[-1] = chr(ord(lst[-1]) & 0x7f) 
+    return string.join(lst, "") 
+
+
+class EnumException(exceptions.Exception): 
+    pass 
+
+class Enumeration: 
+    def __init__(self, enumList): 
+        lookup = { } 
+        reverseLookup = { } 
+        i = 0 
+        uniqueNames = [ ] 
+        uniqueValues = [ ] 
+        for x in enumList: 
+            if type(x) == types.TupleType: 
+                x, i = x 
+            if type(x) != types.StringType: 
+                raise EnumException, "enum name is not a string: " + x 
+            if type(i) != types.IntType: 
+                raise EnumException, "enum value is not an integer: " + i 
+            if x in uniqueNames: 
+                raise EnumException, "enum name is not unique: " + x 
+            if i in uniqueValues: 
+                raise EnumException, "enum value is not unique for " + x 
+            uniqueNames.append(x) 
+            uniqueValues.append(i) 
+            lookup[x] = i 
+            reverseLookup[i] = x 
+            i = i + 1 
+        self.lookup = lookup 
+        self.reverseLookup = reverseLookup 
+    def __add__(self, other): 
+        lst = [ ] 
+        for k in self.lookup.keys(): 
+            lst.append((k, self.lookup[k])) 
+        for k in other.lookup.keys(): 
+            lst.append((k, other.lookup[k])) 
+        return Enumeration(lst) 
+    def hasattr(self, attr): 
+        return self.lookup.has_key(attr) 
+    def has_value(self, attr): 
+        return self.reverseLookup.has_key(attr) 
+    def __getattr__(self, attr): 
+        if not self.lookup.has_key(attr): 
+            raise AttributeError 
+        return self.lookup[attr] 
+    def whatis(self, value): 
+        return self.reverseLookup[value] 
+
+
+channelVoiceMessages = Enumeration([("NOTE_OFF", 0x80), 
+                                    ("NOTE_ON", 0x90), 
+                                    ("POLYPHONIC_KEY_PRESSURE", 0xA0), 
+                                    ("CONTROLLER_CHANGE", 0xB0), 
+                                    ("PROGRAM_CHANGE", 0xC0), 
+                                    ("CHANNEL_KEY_PRESSURE", 0xD0), 
+                                    ("PITCH_BEND", 0xE0)]) 
+
+channelModeMessages = Enumeration([("ALL_SOUND_OFF", 0x78), 
+                                   ("RESET_ALL_CONTROLLERS", 0x79), 
+                                   ("LOCAL_CONTROL", 0x7A), 
+                                   ("ALL_NOTES_OFF", 0x7B), 
+                                   ("OMNI_MODE_OFF", 0x7C), 
+                                   ("OMNI_MODE_ON", 0x7D), 
+                                   ("MONO_MODE_ON", 0x7E), 
+                                   ("POLY_MODE_ON", 0x7F)]) 
+metaEvents = Enumeration([("SEQUENCE_NUMBER", 0x00), 
+                          ("TEXT_EVENT", 0x01), 
+                          ("COPYRIGHT_NOTICE", 0x02), 
+                          ("SEQUENCE_TRACK_NAME", 0x03), 
+                          ("INSTRUMENT_NAME", 0x04), 
+                          ("LYRIC", 0x05), 
+                          ("MARKER", 0x06), 
+                          ("CUE_POINT", 0x07), 
+                          ("MIDI_CHANNEL_PREFIX", 0x20), 
+                          ("MIDI_PORT", 0x21), 
+                          ("END_OF_TRACK", 0x2F), 
+                          ("SET_TEMPO", 0x51), 
+                          ("SMTPE_OFFSET", 0x54), 
+                          ("TIME_SIGNATURE", 0x58), 
+                          ("KEY_SIGNATURE", 0x59), 
+                          ("SEQUENCER_SPECIFIC_META_EVENT", 0x7F)]) 
+
+
+# runningStatus appears to want to be an attribute of a MidiTrack. But 
+# it doesn't seem to do any harm to implement it as a global. 
+runningStatus = None 
+class MidiEvent: 
+    def __init__(self, track): 
+        self.track = track 
+        self.time = None 
+        self.channel = self.pitch = self.velocity = self.data = None 
+    def __cmp__(self, other): 
+        # assert self.time != None and other.time != None 
+        return cmp(self.time, other.time) 
+    def __repr__(self): 
+        r = ("<MidiEvent %s, t=%s, track=%s, channel=%s" % 
+             (self.type, 
+              repr(self.time), 
+              self.track.index, 
+              repr(self.channel))) 
+        for attrib in ["pitch", "data", "velocity"]: 
+            if getattr(self, attrib) != None: 
+                r = r + ", " + attrib + "=" + repr(getattr(self, attrib)) 
+        return r + ">" 
+    def read(self, time, str): 
+        global runningStatus 
+        self.time = time 
+        # do we need to use running status? 
+        if not (ord(str[0]) & 0x80): 
+            str = runningStatus + str 
+        runningStatus = x = str[0] 
+        x = ord(x) 
+        y = x & 0xF0 
+        z = ord(str[1]) 
+        if channelVoiceMessages.has_value(y): 
+            self.channel = (x & 0x0F) + 1 
+            self.type = channelVoiceMessages.whatis(y) 
+            if (self.type == "PROGRAM_CHANGE" or 
+                self.type == "CHANNEL_KEY_PRESSURE"): 
+                self.data = z 
+                return str[2:] 
+            else: 
+                self.pitch = z 
+                self.velocity = ord(str[2]) 
+                channel = self.track.channels[self.channel - 1] 
+                if (self.type == "NOTE_OFF" or 
+                    (self.velocity == 0 and self.type == "NOTE_ON")): 
+                    channel.noteOff(self.pitch, self.time) 
+                elif self.type == "NOTE_ON": 
+                    channel.noteOn(self.pitch, self.time, self.velocity) 
+                return str[3:] 
+        elif y == 0xB0 and channelModeMessages.has_value(z): 
+            self.channel = (x & 0x0F) + 1 
+            self.type = channelModeMessages.whatis(z) 
+            if self.type == "LOCAL_CONTROL": 
+                self.data = (ord(str[2]) == 0x7F) 
+            elif self.type == "MONO_MODE_ON": 
+                self.data = ord(str[2]) 
+            return str[3:] 
+        elif x == 0xF0 or x == 0xF7: 
+            self.type = {0xF0: "F0_SYSEX_EVENT", 
+                         0xF7: "F7_SYSEX_EVENT"}[x] 
+            length, str = getVariableLengthNumber(str[1:]) 
+            self.data = str[:length] 
+            return str[length:] 
+        elif x == 0xFF: 
+            if not metaEvents.has_value(z): 
+                print "Unknown meta event: FF %02X" % z 
+                sys.stdout.flush() 
+                raise "Unknown midi event type" 
+            self.type = metaEvents.whatis(z) 
+            length, str = getVariableLengthNumber(str[2:]) 
+            self.data = str[:length] 
+            return str[length:] 
+        raise "Unknown midi event type" 
+    def write(self): 
+        sysex_event_dict = {"F0_SYSEX_EVENT": 0xF0, 
+                            "F7_SYSEX_EVENT": 0xF7} 
+        if channelVoiceMessages.hasattr(self.type): 
+            x = chr((self.channel - 1) + 
+                    getattr(channelVoiceMessages, self.type)) 
+            if (self.type != "PROGRAM_CHANGE" and 
+                self.type != "CHANNEL_KEY_PRESSURE"): 
+                data = chr(self.pitch) + chr(self.velocity) 
+            else: 
+                data = chr(self.data) 
+            return x + data 
+        elif channelModeMessages.hasattr(self.type): 
+            x = getattr(channelModeMessages, self.type) 
+            x = (chr(0xB0 + (self.channel - 1)) + 
+                 chr(x) + 
+                 chr(self.data)) 
+            return x 
+        elif sysex_event_dict.has_key(self.type): 
+            str = chr(sysex_event_dict[self.type]) 
+            str = str + putVariableLengthNumber(len(self.data)) 
+            return str + self.data 
+        elif metaEvents.hasattr(self.type): 
+            str = chr(0xFF) + chr(getattr(metaEvents, self.type)) 
+            str = str + putVariableLengthNumber(len(self.data)) 
+            return str + self.data 
+        else: 
+            raise "unknown midi event type: " + self.type 
+
+
+
+""" 
+register_note() is a hook that can be overloaded from a script that 
+imports this module. Here is how you might do that, if you wanted to 
+store the notes as tuples in a list. Including the distinction 
+between track and channel offers more flexibility in assigning voices. 
+import midi 
+notelist = [ ] 
+def register_note(t, c, p, v, t1, t2): 
+    notelist.append((t, c, p, v, t1, t2)) 
+midi.register_note = register_note 
+""" 
+def register_note(track_index, channel_index, pitch, velocity, 
+                  keyDownTime, keyUpTime): 
+    pass 
+
+
+
+class MidiChannel: 
+    """A channel (together with a track) provides the continuity connecting 
+    a NOTE_ON event with its corresponding NOTE_OFF event. Together, those 
+    define the beginning and ending times for a Note.""" 
+    def __init__(self, track, index): 
+        self.index = index 
+        self.track = track 
+        self.pitches = { } 
+    def __repr__(self): 
+        return "<MIDI channel %d>" % self.index 
+    def noteOn(self, pitch, time, velocity): 
+        self.pitches[pitch] = (time, velocity) 
+    def noteOff(self, pitch, time): 
+        if self.pitches.has_key(pitch): 
+            keyDownTime, velocity = self.pitches[pitch] 
+            register_note(self.track.index, self.index, pitch, velocity, 
+                          keyDownTime, time) 
+            del self.pitches[pitch] 
+        # The case where the pitch isn't in the dictionary is illegal, 
+        # I think, but we probably better just ignore it. 
+
+
+class DeltaTime(MidiEvent): 
+    type = "DeltaTime" 
+    def read(self, oldstr): 
+        self.time, newstr = getVariableLengthNumber(oldstr) 
+        return self.time, newstr 
+    def write(self): 
+        str = putVariableLengthNumber(self.time) 
+        return str 
+
+
+class MidiTrack: 
+    def __init__(self, index): 
+        self.index = index 
+        self.events = [ ] 
+        self.channels = [ ] 
+        self.length = 0 
+        for i in range(16): 
+            self.channels.append(MidiChannel(self, i+1)) 
+    def read(self, str): 
+        time = 0 
+        assert str[:4] == "MTrk" 
+        length, str = getNumber(str[4:], 4) 
+        self.length = length 
+        mystr = str[:length] 
+        remainder = str[length:] 
+        while mystr: 
+            delta_t = DeltaTime(self) 
+            dt, mystr = delta_t.read(mystr) 
+            time = time + dt 
+            self.events.append(delta_t) 
+            e = MidiEvent(self) 
+            mystr = e.read(time, mystr) 
+            self.events.append(e) 
+        return remainder 
+    def write(self): 
+        time = self.events[0].time 
+        # build str using MidiEvents 
+        str = "" 
+        for e in self.events: 
+            str = str + e.write() 
+        return "MTrk" + putNumber(len(str), 4) + str 
+    def __repr__(self): 
+        r = "<MidiTrack %d -- %d events\n" % (self.index, len(self.events)) 
+        for e in self.events: 
+            r = r + "    " + `e` + "\n" 
+        return r + "  >" 
+
+
+
+class MidiFile: 
+    def __init__(self): 
+        self.file = None 
+        self.format = 1 
+        self.tracks = [ ] 
+        self.ticksPerQuarterNote = None 
+        self.ticksPerSecond = None 
+    def open(self, filename, attrib="rb"): 
+        if filename == None: 
+            if attrib in ["r", "rb"]: 
+                self.file = sys.stdin 
+            else: 
+                self.file = sys.stdout 
+        else: 
+            self.file = open(filename, attrib) 
+    def __repr__(self): 
+        r = "<MidiFile %d tracks\n" % len(self.tracks) 
+        for t in self.tracks: 
+            r = r + "  " + `t` + "\n" 
+        return r + ">" 
+    def close(self): 
+        self.file.close() 
+    def read(self): 
+        self.readstr(self.file.read()) 
+    def readstr(self, str): 
+        assert str[:4] == "MThd" 
+        length, str = getNumber(str[4:], 4) 
+        assert length == 6 
+        format, str = getNumber(str, 2) 
+        self.format = format 
+        assert format == 0 or format == 1   # dunno how to handle 2 
+        numTracks, str = getNumber(str, 2) 
+        division, str = getNumber(str, 2) 
+        if division & 0x8000: 
+            framesPerSecond = -((division >> 8) | -128) 
+            ticksPerFrame = division & 0xFF 
+            assert ticksPerFrame == 24 or ticksPerFrame == 25 or \
+                   ticksPerFrame == 29 or ticksPerFrame == 30 
+            if ticksPerFrame == 29: ticksPerFrame = 30  # drop frame 
+            self.ticksPerSecond = ticksPerFrame * framesPerSecond 
+        else: 
+            self.ticksPerQuarterNote = division & 0x7FFF 
+        for i in range(numTracks): 
+            trk = MidiTrack(i) 
+            str = trk.read(str) 
+            self.tracks.append(trk) 
+    def write(self): 
+        self.file.write(self.writestr()) 
+    def writestr(self): 
+        division = self.ticksPerQuarterNote 
+        # Don't handle ticksPerSecond yet, too confusing 
+        assert (division & 0x8000) == 0 
+        str = "MThd" + putNumber(6, 4) + putNumber(self.format, 2) 
+        str = str + putNumber(len(self.tracks), 2) 
+        str = str + putNumber(division, 2) 
+        for trk in self.tracks: 
+            str = str + trk.write() 
+        return str 
+
+
+def main(argv): 
+    global debugflag 
+    import getopt 
+    infile = None 
+    outfile = None 
+    printflag = 0 
+    optlist, args = getopt.getopt(argv[1:], "i:o:pd") 
+    for (option, value) in optlist: 
+        if option == '-i': 
+            infile = value 
+        elif option == '-o': 
+            outfile = value 
+        elif option == '-p': 
+            printflag = 1 
+        elif option == '-d': 
+            debugflag = 1 
+    m = MidiFile() 
+    m.open(infile) 
+    m.read() 
+    m.close()
+    if printflag: 
+        print m 
+    else: 
+        m.open(outfile, "wb") 
+        m.write() 
+        m.close() 
+
+
+if __name__ == "__main__": 
+    main(sys.argv) 
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/music_objects.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,256 @@
+
+from basic_functions import ceiling, string_to_sequence, calculate_bar_ticks, velocity_sequence_to_min_timespan
+import parameter_setter 
+import rhythm_parser 
+
+class Note():
+	def __init__(self, firstarg = None, duration = None, velocity = None):
+		self.startTime = 0
+		self.duration = 0
+		self.velocity = 0
+
+		if firstarg != None:
+			if isinstance(firstarg,basestring):
+				intlist = string_to_sequence(firstarg,int)
+				self.startTime = intlist[0]
+				self.duration = intlist[1]
+				self.velocity = intlist[2]
+			elif isinstance(firstarg,int):
+				self.startTime = firstarg
+
+		if duration != None:
+			self.duration = duration
+		if velocity != None:
+			self.velocity = velocity
+
+
+	def to_string(self):
+		return "(%d,%d,%f)" %(self.startTime, self.duration, self.velocity)
+
+
+# NoteSequence is a list of Note
+class NoteSequence(list):
+	def __init__(self, noteSequenceString = None):
+		if noteSequenceString != None:
+			self.string_to_note_sequence(noteSequenceString)
+
+	def string_to_note_sequence(self, noteSequenceString):
+		noteSequenceString = rhythm_parser.discard_spaces(noteSequenceString)
+		# try:
+			# Turning "(1,2,3),(4,5,6),(7,8,9)" into ["1,2,3","4,5,6,","7,8,9"]
+		listStrings = noteSequenceString[1:-1].split("),(")
+		for localString in listStrings:
+			self.append(Note(localString))
+
+	def to_string(self):
+		noteSequenceString = ""
+		for note in self:
+			noteSequenceString += note.to_string() + ","
+		return noteSequenceString[:-1]
+
+
+class NormalisedVelocityValueOutOfRange(Exception):
+	def __init__(self, value):
+		self.value = value
+	def __str__(self):
+		return repr(self.value)
+
+# VelocitySequence is a list of float numbers
+class VelocitySequence(list):
+	def __init__(self, velocitySequence = None):
+		if velocitySequence != None:
+			if isinstance(velocitySequence,basestring):
+				self.string_to_velocity_sequence(velocitySequence)
+			elif isinstance(velocitySequence, list):
+				self+=velocitySequence
+
+	def string_to_velocity_sequence(self,inputString):
+		
+		def convert_velocity_value(argstring):
+			value = float(argstring)
+			if value>=0 and value<=1:
+				return value
+			else:
+				raise NormalisedVelocityValueOutOfRange("Value: "+argstring+" in " + inputString)
+
+		self.extend(string_to_sequence(inputString,convert_velocity_value))
+
+
+	def to_string(self):
+		return str(velocity_sequence_to_min_timespan(self))[1:-1].replace(" ","")
+
+
+def velocity_sequence_to_note_sequence(velocitySequence, nextbarVelocitySequence = None):
+	
+	noteSequence = NoteSequence()
+
+	for index in range(len(velocitySequence)):
+		if (velocitySequence[index]!= 0): # onset detected
+			startTime = index			
+			velocity = velocitySequence[index]
+
+			# if there are previous notes added
+			if( len(noteSequence) > 0):
+				previousNote = noteSequence[-1]
+				previousNote.duration = startTime - previousNote.startTime
+
+			# add the current note into note sequence
+			noteSequence.append( Note(startTime, 0, velocity) )
+
+	# to set the duration for the last note
+	if( len(noteSequence) > 0):
+		lastNote = noteSequence[-1]
+		
+		if nextbarVelocitySequence == None:
+			lastNote.duration = len(velocitySequence) - lastNote.startTime
+		else:
+			nextNoteStartTime = next((index for index, v in enumerate(nextbarVelocitySequence) if v), None)
+			lastNote.duration = len(velocitySequence) + nextNoteStartTime-lastNote.startTime
+
+
+	return noteSequence
+
+
+def note_sequence_to_velocity_sequence(noteSequence, timespanTicks = None):
+
+	velocitySequence = VelocitySequence()
+	
+	previousNoteStartTime = -1
+
+	for note in noteSequence:
+		
+		interOnsetInterval = note.startTime - previousNoteStartTime	
+		velocitySequence += [0]*(interOnsetInterval-1)	
+		velocitySequence += [note.velocity]
+
+		previousNoteStartTime = note.startTime
+
+	if timespanTicks!=None:
+		velocitySequence += [0]*(timespanTicks - len(velocitySequence))
+	else:
+		velocitySequence += [0]*(noteSequence[-1].duration-1)
+
+	# normalising velocity sequence between 0-1
+	if max(velocitySequence)>0:
+		velocitySequence = VelocitySequence([float(v)/max(velocitySequence) for v in velocitySequence])
+
+	return velocitySequence
+
+
+class BarList(list):
+	def append(self,bar):
+		if(len(self)>0):
+			bar.set_previous_bar(self[-1])
+			self[-1].set_next_bar(bar)
+		super(BarList, self).append(bar)
+
+	def concat(self, barList):
+		while(len(barList)!=0):
+			localbar = barList[0]
+			self.append(localbar)
+			barList.remove(localbar)
+
+
+class Bar:
+	def __init__(self, rhythmSequence, timeSignature, ticksPerQuarter=None, qpmTempo=None, nextBar=None, prevBar=None):
+		if isinstance(rhythmSequence, NoteSequence):
+			self.noteSequence = rhythmSequence
+			self.velocitySequence = None 
+		elif isinstance(rhythmSequence, VelocitySequence):
+			self.velocitySequence = rhythmSequence
+			self.noteSequence = None 
+
+		self.tpq = ticksPerQuarter
+		self.qpm = qpmTempo
+		if isinstance(timeSignature, basestring):
+			self.timeSignature = TimeSignature(timeSignature)
+		else:
+			self.timeSignature = timeSignature
+		self.nextBar = nextBar
+		self.prevBar = prevBar
+
+	def get_note_sequence(self):
+		if self.noteSequence == None:
+			nextbarVelocitySequence = None
+			if self.nextBar != None:
+				nextbarVelocitySequence = self.nextBar.get_velocity_sequence()
+			self.noteSequence = velocity_sequence_to_note_sequence(self.velocitySequence, nextbarVelocitySequence)
+		return self.noteSequence
+
+	def get_velocity_sequence(self):
+		if self.velocitySequence == None:
+			self.velocitySequence = note_sequence_to_velocity_sequence(self.noteSequence, self.get_bar_ticks())
+		return self.velocitySequence
+
+	def get_binary_sequence(self):
+		return ceiling(self.get_velocity_sequence())
+
+	def get_next_bar(self):
+		return self.nextBar
+
+	def get_previous_bar(self):
+		return self.prevBar
+
+	def set_next_bar(self, bar):
+		self.nextBar = bar
+
+	def set_previous_bar(self, bar):
+		self.prevBar = bar		
+
+	def get_subdivision_sequence(self):
+		return self.timeSignature.get_subdivision_sequence()
+
+	def get_beat_level(self):
+		return self.timeSignature.get_beat_level()
+
+	def get_time_signature(self):
+		return self.timeSignature
+
+	# return the length of a bar in time units (ticks)
+	def get_bar_ticks(self):
+		return calculate_bar_ticks(self.timeSignature.get_numerator(),self.timeSignature.get_denominator(), self.tpq)
+
+	def is_empty(self):
+		if max(self.get_velocity_sequence())>0:
+			return False
+		else:
+			return True
+
+	def to_string(self, sequenceType=None):
+		output = "t{"+self.timeSignature.to_string()+"}"
+		prev = self.get_previous_bar()
+		if prev!=None:
+			if prev.get_time_signature()==self.get_time_signature():
+				output=""
+
+		if sequenceType==None or sequenceType=="v":
+			output += "v{"+self.get_velocity_sequence().to_string()+"}"
+		else:
+			output += "y{"+self.get_note_sequence().to_string()+"}"
+		return output
+
+
+class TimeSignature():
+	def __init__(self, inputString):
+		if inputString in parameter_setter.read_time_signature():
+			self.tsString = inputString
+		else:
+			print "Error: undefined time-signature: ", inputString
+			raise NullTimeSignatureError
+
+	def get_subdivision_sequence(self):
+		return parameter_setter.timeSignatureBase[self.tsString][0]
+	
+	def get_beat_level(self):
+		return parameter_setter.timeSignatureBase[self.tsString][1]
+
+	def get_numerator(self):
+		return int(self.tsString.split('/')[0])
+			
+	def get_denominator(self):
+		return int(self.tsString.split('/')[1])
+
+	def to_string(self):
+		return self.tsString
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/parameter_setter.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,113 @@
+'''
+Author: Chunyang Song
+Institution: Centre for Digital Music, Queen Mary University of London
+'''
+
+# Set the parameters: time-signature, subdivision-sequence, strong-beat-level; Lmax; weight-sequence
+# Important condition: Lmax needs to be no less than the length of subdivision-sequence and the length of weight-sequence
+
+
+# {'key': time-signature} :  
+# {'value': [subdivision-sequence, theoretical beat-level represented by index in the subdivision-sequence list]}
+timeSignatureBase = {
+	'2/2': [[1,2,2,2,2,2],1],
+	'3/2': [[1,3,2,2,2,2],1],
+	'4/2': [[1,2,2,2,2,2],1],
+	'2/4': [[1,2,2,2,2,2],1],
+	'3/4': [[1,3,2,2,2,2],1],
+	'4/4': [[1,2,2,2,2,2],2],
+	'5/4': [[1,5,2,2,2,2],1],
+	'7/4': [[1,7,2,2,2,2],1],
+	'3/8': [[1,3,2,2,2,2],1],
+	'5/8': [[1,5,2,2,2,2],1],
+	'6/8': [[1,2,3,2,2,2],1],
+	'9/8': [[1,3,3,2,2,2],1],
+	'12/8':[[1,2,2,3,2,2],2],	
+}
+
+
+def add_time_signature(timeSignature, subdivisionSequence, beatLevel):
+	if is_time_signature_valid(timeSignature,subdivisionSequence,beatLevel):
+		if timeSignature in timesigBase:
+			print 'This time-signature is existed already.'
+		else:
+			timeSignatureBase[timeSignature] = [subdivisionSequence, beatLevel]
+			write_time_signature()
+
+def update_time_signature(timeSignature, subdivisionSequence, beatLevel):
+	if is_time_signature_valid(timeSignature,subdivisionSequence,beatLevel):
+		if timeSignature in timeSignatureBase:
+			print 'Original settings for ', timeSignature, ':',timeSignatureBase[timeSignature] 
+			timeSignatureBase[timeSignature] = [subdivisionSequence, beatLevel]
+			print 'Changed into:',timeSignatureBase[timeSignature]
+			write_time_signature()
+
+def is_time_signature_valid(timeSignature, subdivisionSequence, beatLevel):
+	isValid = False
+	if ('/' not in timeSignature) or (not timeSignature.split('/')[0].isdigit()) or (not timeSignature.split('/')[1].isdigit()):
+		print 'Error: invalid time-signature. Please indicate in the form of fraction, e.g. 4/4, 6/8 or 3/4.'
+	elif subdivisionSequence != [s for s in subdivisionSequence if isinstance(s,int)]:
+		print 'Error: invalid subdivision sequence. Please indicate in the form of list of numbers, e.g [1,2,2,2,2].'
+	elif beatLevel >= len(subdivisionSequence):
+		print 'Error: beat-level exceeds the range of subdivision sequence list.'
+	else:
+		isValid = True
+	return isValid
+
+def write_time_signature():
+	import cPickle as pickle
+	timeSigFile = open('TimeSignature.pkl', 'wb')
+	pickle.dump(timeSignatureBase, timeSigFile)
+	timeSigFile.close()
+
+def read_time_signature():
+	import cPickle as pickle
+	timeSigFile = open('TimeSignature.pkl','rb')
+	data = pickle.load(timeSigFile)
+	return data
+	timeSigFile.close()
+
+def print_time_signature_base():
+	data = read_time_signature()
+	for timeSignature, settings in data.items():
+		print timeSignature, settings
+
+
+def are_parameters_valid(Lmax, weightSequence, subdivisionSequence):
+
+	# is_Lmax_valid() checks:
+	# 1. if Lmax is a non-negative integer
+	# 2. if Lmax is higher than the length of weightSequence and subdivisionSequence 
+	def is_Lmax_valid():
+		isValid = False
+		if isinstance(Lmax,int) and Lmax > 0:
+			if Lmax <= len(subdivisionSequence)-1:
+				if Lmax <= len(weightSequence)-1:
+					isValid = True
+				else:
+					print 'Error: Lmax exceeds the length of weight-sequence. Either reduce Lmax, or provide a new weight-sequence whose length is greater or equal to Lmax.'
+			else:
+				print 'Error: Lmax exceeds the length of subdivision-sequence. Either reduce Lmax, or extend subdivision-sequence through updating time-signature (refer to update_time_signature function).'
+		else:	
+			print 'Error: Lmax needs to be a positive integer.'
+		return isValid
+
+	# is_weight_sequence_valid() checks:
+	# 1. weightSequence is a list of numbers
+	# 2. the length of weightSequence is no less than Lmax
+	def is_weight_sequence_valid():
+		isValid = False
+		if isinstance(weightSequence,list) and weightSequence == [i for i in weightSequence if isinstance(i,int)]:
+			if len(weightSequence) >= Lmax:
+				isValid = True
+			else:
+				print 'Error: the length of weight-sequence needs to be greater or equal to Lmax.'
+		else:
+			print 'Error: the weight-sequence needs to be a list of integers.'
+		return isValid
+
+
+	if is_weight_sequence_valid() and is_Lmax_valid():
+		return True
+	else:
+		return False
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/readmidi.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,381 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Sat Mar 21 22:19:52 2015
+
+@author: christopherh
+"""
+
+from midiparser import MidiFile, MidiTrack, DeltaTime, MidiEvent
+#from RhythmParser import Bar
+
+from music_objects import *
+from basic_functions import *
+
+
+
+
+def read_midi_file(filename):
+	""" open and read a MIDI file, return a MidiFile object """
+
+	#create a midifile object, open and read a midi file
+	midiFile = MidiFile()
+	midiFile.open(filename, 'rb')
+	midiFile.read()
+	midiFile.close()
+
+	return midiFile
+
+# def get_bars(midiFile, trackindex=1):
+# 	""" returns a list of bar objects from a MidiFile object """
+
+# 	# select a track to extract (default = 1, ignoring dummy track 0)
+# 	track = midiFile.tracks[trackindex] 
+# 	eventIndex = 0
+# 	numNotes = 0
+
+# 	noteonlist = []
+# 	noteOnFound==True
+
+# 	while noteOnFound==True:
+# 		(noteOnIndex, noteOnDelta, noteOnFound) = self.find_event(track, eventIndex, lambda e: e.type == 'NOTE_ON')
+# 		noteEvent = track.events[noteOnIndex]
+# 		eventIndex = noteOnIndex + 1
+            	
+
+#find_event(x.tracks[0], 0, lambda e: (e.type == 'NOTE_ON') | (e.type == 'KEY_SIGNATURE') | (e.type == "TIME_SIGNATURE"))
+
+'''
+	#read midiFile
+	
+	
+
+	run through selected track getting notes out to build bars
+
+'''
+	
+
+
+def get_bars_from_midi(midiFile):
+
+	# couple of inner functions to tidy up getting the initial values of
+	# tempo and time signature
+	def get_initial_tempo(timeList):
+
+		tempo = None
+		i=0
+		# Find the initial time and tempo:
+		while(tempo == None and i<len(timeList)):
+			event = timeList[i]
+			i = i + 1
+			if event.type=="SET_TEMPO":
+		 		if tempo==None:
+					tempo = midi_event_to_qpm_tempo(event)
+
+		if tempo==None:
+			tempo = 120
+
+		return tempo
+
+	def get_initial_time_signature(timeList):
+
+		timesig = None
+		i=0
+		# Find the initial time and tempo:
+		while(timesig == None and i<len(timeList)):
+			event = timeList[i]
+			i = i + 1
+			if event.type=="TIME_SIGNATURE":
+				if timesig==None:
+					timesig = midi_event_to_time_signature(event)
+
+		if timesig==None:
+			timesig = TimeSignature("4/4")
+			
+		return timesig
+
+
+	def get_time_signature(timeList,barStartTime, barEndTime, currentTimeSignature = None):
+		
+		timesig = None
+		i=0
+		
+		while(i<len(timeList)):
+			# run through list until we find the most recent time signature
+			# before the end of the current bar
+			event = timeList[i]
+			i = i + 1
+			if event.time>=barEndTime:
+				break
+
+			if event.type=="TIME_SIGNATURE" and event.time>=barStartTime:
+				timesig = midi_event_to_time_signature(event)
+				event.type = "USED_TIME_SIGNATURE"
+	
+		if timesig==None:
+			if currentTimeSignature==None:
+				timesig = TimeSignature("4/4")
+			else:
+				timesig = currentTimeSignature
+
+		return timesig	
+
+	def get_tempo(timeList,barStartTime, barEndTime, currentTempo = None):
+		
+		tempo = None
+		i=0
+		# get first event:
+		while(i<len(timeList)):
+			# run through list until we find the most recent time signature
+			# before the end of the current bar
+			event = timeList[i]
+			i = i + 1
+			if event.time>=barEndTime:
+				break
+
+			# run through list until we find the most recent tempo
+			# before the end of the current bar
+			if event.type=="SET_TEMPO" and event.time>=barStartTime:
+				tempo = midi_event_to_qpm_tempo(event)
+				event.type = "USED_TEMPO"
+
+		if tempo==None:
+			if currentTempo==None:
+				tempo = 120
+			else:
+				tempo = currentTempo
+
+		return tempo
+
+
+
+	# get initial time sig and tempo or use defaults
+	timeList = get_time_events(midiFile)
+
+	# get notes from the midi file (absolute start times from start of file)
+	notesList = get_notes_from_event_list(get_note_events(midiFile))
+	
+
+	# get initial tempo and time signature from time list
+	timesig = get_initial_time_signature(timeList)
+	tempo = get_initial_tempo(timeList)
+
+
+
+
+	# ticks per quarter note:
+	ticksPerQuarter = midiFile.ticksPerQuarterNote
+	#calculate the initial length of a bar in ticks
+	barlength = calculate_bar_ticks(timesig.get_numerator(), timesig.get_denominator(), ticksPerQuarter)
+	# initialise time for start and end of current bar
+	barStartTime = 0
+	barEndTime = barlength
+	
+
+	# initialise bars list
+	bars = BarList()
+	noteIndex = 0
+
+	note = notesList[0]
+	# run through the notes list, chopping it into bars
+	while noteIndex<len(notesList):
+		#create a local note sequence to build a bar
+		currentNotes = NoteSequence()
+
+		timesig = get_time_signature(timeList,barStartTime, barEndTime, timesig)
+		tempo = get_tempo(timeList,barStartTime, barEndTime, tempo)
+		
+
+		#find all the notes in the current bar
+		while(note.startTime<barEndTime):
+			#make note start time relative to current bar
+			note.startTime = note.startTime - barStartTime
+			#add note to current bar note sequence
+			currentNotes.append(note)
+			noteIndex = noteIndex + 1
+			if noteIndex<len(notesList):
+				note = notesList[noteIndex]
+			else:
+				break
+
+		# create a new bar from the current notes and add it to the list of bars
+		bars.append(Bar(currentNotes, timesig, ticksPerQuarter, tempo))
+
+		barStartTime = barEndTime
+
+		barEndTime = barEndTime + barlength
+
+	return bars
+
+		
+#get note objects from a list of note midi events
+def get_notes_from_event_list(noteEventList):
+	noteslist = NoteSequence()
+
+	index = 0
+	
+	#while not at the end of the list of note events
+	while index<len(noteEventList):
+		#get next event from list
+		event = noteEventList[index]
+		index = index + 1
+		#if we've found the start of a note, search for the corresponding end event
+		if event.type=="NOTE_ON" and event.velocity!=0:
+			localindex = index
+			
+			#find corresponding end event
+			while localindex<len(noteEventList):
+				endEvent = noteEventList[localindex]
+				#if its the same note and it's an end event
+				if endEvent.pitch==event.pitch and (endEvent.type=="NOTE_OFF" or (endEvent.type=="NOTE_ON" and endEvent.velocity==0)):
+					#make a note
+					note = Note(event.time,endEvent.time-event.time,event.velocity)
+					#alter the type of this end event so it can't be linked to another note on
+					endEvent.type = "DUMMY"
+					#add the note to the list
+					noteslist.append(note)
+					#found the end of the note so break out of the local loop
+					break
+
+	return noteslist
+
+
+
+
+
+
+
+
+
+
+def get_note_events(midiFile, trackNumber = None):
+	"""
+	Gets all note on and note off events from a midifile.
+
+	If trackNumber is not specified, the function will check the file format
+	and pick either track 0 for a type 0 (single track format) or track 1 
+	for a type 1 or 2 (multi-track) midi file.
+
+	"""
+
+	if trackNumber==None:
+		if midiFile.format==0:
+			trackNumber=0
+		else:
+			trackNumber=1
+
+	return get_events_of_type(midiFile, trackNumber, lambda e: (e.type == 'NOTE_ON') | (e.type == "NOTE_OFF") )
+	
+
+
+def get_time_events(midiFile):
+	"""
+	Gets time signature and tempo events from a MIDI file (MIDI format 0 
+	or format 1) and returns a list of those events and their associated 
+	absolute start times.  If no time signature or tempo are specified then
+	defaults of 4/4 and 120QPM are assumed.
+
+	From the MIDI file specification:
+
+	"All MIDI Files should specify tempo and time signature. If they don't, 
+	the time signature is assumed to be 4/4, and the tempo 120 beats per 
+	minute. In format 0, these meta-events should occur at least at the 
+	beginning of the single multi-channel track. In format 1, these meta-events
+	should be contained in the first track. In format 2, each of the temporally
+	independent patterns should contain at least initial time signature and 
+	tempo information."
+
+	"""
+	return get_events_of_type(midiFile, 0, lambda e: (e.type == 'SET_TEMPO') | (e.type == "TIME_SIGNATURE") )
+	
+
+def get_events_of_type(midiFile, trackIndex, lambdaEventType):
+	"""
+	Filters the events in a midi track that are selected by the 
+	function object lambdaEventType e.g. lambda e: (e.type == 'NOTE_ON')
+	Return a list containing the relevant events with appropriate 
+	delta times between them 
+	"""
+	
+	track = midiFile.tracks[trackIndex] 
+	eventIndex = 0
+	#	numTimeEvents = 0
+
+	localEventList  = []
+	localEventFound = True
+	#accumulatedTime = 0
+
+	while localEventFound==True:
+		#find the next time event from the track:
+		(localEventIndex, localEventDelta, localEventFound) = find_event(track, eventIndex, lambdaEventType)
+
+		if localEventFound==True:
+			#get the time event object out of the track
+			localEvent = track.events[localEventIndex]
+
+			#update the start event to search from
+			eventIndex = localEventIndex + 1
+
+			#calculate the absolute start time of the time event
+			#accumulatedTime = accumulatedTime + localEventDelta
+
+			localEventList.append(localEvent)
+
+	return localEventList
+
+
+def midi_event_to_time_signature(midiTimeSignatureEvent):
+	"""
+	Extract the numerator and denominator from a midi time signature
+	event and return a TimeSignature music object.  Ignore clocks per
+	quarternote and  32nds per quarternote elements since these are
+	only for sequencer metronome settings which we won't use here.
+	"""
+	if midiTimeSignatureEvent.type!="TIME_SIGNATURE":
+		print "Error in midi_event_to_time_signature(),  event must be a midi time signature type"
+		return None
+	else:
+		num = ord(midiTimeSignatureEvent.data[0])
+		denom = 2**ord(midiTimeSignatureEvent.data[1])
+		return TimeSignature("%d/%d" % (num, denom))
+	
+
+def midi_event_to_qpm_tempo(midiTempoEvent):
+	"""
+	Extract the tempo in QPM from a midi SET_TEMPO event
+	"""
+	if midiTempoEvent.type!="SET_TEMPO":
+		print "Error in midi_event_to_qpm_tempo(),  event must be a midi tempo event"
+		return None
+	else:
+		# tempo is stored as microseconds per quarter note
+		# in three bytes which we can extract as three ints:
+		values = map(ord, midiTempoEvent.data)
+		# first byte multiplied by 2^16, second 2^8 and third is normal units
+		# giving microseconds per quarter
+		microsecondsPerQuarter = values[0]*2**16 + values[1]*2**8 + values[2]
+
+		# to calculate QPM, 1 minute = 60million microseconds
+		# so divide 60million by micros per quarter:
+		return 60000000/microsecondsPerQuarter
+
+def find_event(track, eventStartIndex, lambdaExpr):
+	'''
+	From code by Csaba Sulyok:
+	Finds MIDI event based on lambda expression, starting from a given index.
+	Returns a tuple of the following 3 elements:
+	1. event index where the lambda expression is true
+	2. aggregate delta time from event start index until the found event
+	3. flag whether or not any value was found, or we've reached the end of the event queue
+	'''
+
+	eventIndex = eventStartIndex
+	deltaTime = 0
+	while eventIndex < len(track.events) and not lambdaExpr(track.events[eventIndex]):
+	    if track.events[eventIndex].type == 'DeltaTime':
+	        deltaTime += track.events[eventIndex].time
+	    eventIndex += 1
+
+	success = eventIndex < len(track.events)
+	return (eventIndex, deltaTime, success)
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/rhythm_parser.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,146 @@
+'''
+Authors: Chunyang Song, Christopher Harte
+Institution: Centre for Digital Music, Queen Mary University of London
+'''
+
+# Parse the rhythm file and return a list of Bar objects
+#Piece = []
+
+from parameter_setter import timeSignatureBase
+from music_objects import *
+
+comment_sign = '#'
+
+def discard_comments(line):
+	if comment_sign in line:
+		line = line[0:line.find(comment_sign)]
+	return line
+
+def discard_spaces(line):
+	line = line.replace(" ", '').replace("\t", '')
+	return line
+
+def discard_linereturns(line):
+	line = line.replace("\n","").replace("\r","")
+	return line
+	
+
+# def extractInfo(line):
+# 	try:
+# 		if '{' not in line and '}' not in line:
+# 			raise RhythmSyntaxError(line)
+# 		else:
+# 			return line[line.find('{')+1 : line.find('}')]
+# 	except RhythmSyntaxError:
+# 		print 'Rhythmic information needs to be enclosed by "{" and "}"'
+
+
+def read_rhythm(fileName):
+	fileContent = file(fileName)
+
+	barList = BarList()
+
+	tempo=None
+	timeSignature=None
+	ticksPerQuarter=None
+
+	# for each line in the file, parse the line and add any 
+	# new bars to the main bar list for the piece
+	for line in fileContent:
+		
+		# ignore the line if it's only a comment
+		if is_comment(line) or line=="\n":
+			continue
+
+		# if time signature has not yet been set then it should be the first 
+		# thing we find in a file after the comments at the top
+		if timeSignature==None:
+			(field, line) = get_next_field(line)
+			# if there is a valid field, it should be a time signature
+			if field!=None:
+				[fieldname,value] = field
+				if fieldname.lower()=="t":
+					timeSignature = TimeSignature(value)
+				else:
+					print 'Error, first field in the file should set the time signature.'
+
+		# parse the line
+		(newbarlist, tempo, timeSignature, ticksPerQuarter) = parse_line(line, timeSignature,  tempo, ticksPerQuarter)
+		
+		# if we found some bars in this line then add them to the overall bar list
+		if len(newbarlist)>0:
+			barList.concat(newbarlist)
+
+	return barList
+
+def is_comment(line):
+	if discard_spaces(line)[0]==comment_sign:
+		return True
+	else:
+		return False
+
+def parse_line(line,  timeSignature=None, tempo=None, ticksPerQuarter=None):
+	
+	#strip the line of line returns, spaces and comments
+	line = discard_linereturns(discard_spaces(discard_comments(line)))
+	
+	bars = BarList()
+
+	#work through each field in the line
+	while len(line)>0:
+		(field, line) = get_next_field(line)
+
+		if field!=None:
+			
+			[fieldname, value] = field
+			
+			if fieldname.lower() == "v":
+				#velocity sequence
+				bar = Bar(VelocitySequence(value),timeSignature, ticksPerQuarter, tempo)	
+				bars.append(bar)
+
+			elif fieldname.lower() == "y":
+				#note sequence	
+				bar = Bar(NoteSequence(value), timeSignature, ticksPerQuarter, tempo)	
+				bars.append(bar)
+
+			elif fieldname.lower() == "t":
+				#time signature
+				timeSignature = TimeSignature(value)
+			
+			elif fieldname.lower() == "tpq":
+				#ticks per quarter
+				ticksPerQuarter = int(value)
+			
+			elif fieldname.lower() == "qpm":
+				#tempo
+				tempo = int(value)
+			
+			else:
+				print 'Unrecognised field type: "' + fieldname + '"'
+	
+	return bars, tempo, timeSignature, ticksPerQuarter
+
+class RhythmSyntaxError(Exception):
+	def __init__(self, value):
+		self.value = value
+	def __str__(self):
+		return repr(self.value)
+
+def get_next_field(line):
+	index = line.find("}")
+	field = None
+	if index>=0:
+		fieldtext = line[:index]
+		line = line[index+1:]
+		field = fieldtext.split("{")
+	else:
+		print 'Error, incorrect syntax: "'+line+'"'
+		raise RhythmSyntaxError(line)
+
+	return field,line
+
+
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Syncopation models/synpy/syncopation.py	Thu Apr 23 23:52:04 2015 +0100
@@ -0,0 +1,112 @@
+'''
+Author: Chunyang Song
+Institution: Centre for Digital Music, Queen Mary University of London
+
+'''
+from rhythm_parser import *
+from music_objects import *
+
+
+def sync_perbar_permodel (model, bar, parameters=None):
+	return model.get_syncopation(bar, parameters)
+
+def calculate_syncopation(model, source, parameters=None, outfile=None):
+ 	total = 0.0
+ 	barResults = []
+ 	numberOfNotes = 0
+
+	barlist = None
+
+ 	if isinstance(source, BarList):
+ 		barlist = source
+ 		sourceType = "bar list"
+ 	elif isinstance(source, Bar):
+ 		barlist = BarList().append(source)
+ 		sourceType = "single bar"
+	elif isinstance(source, basestring):
+		#treat source as a filename
+		sourceType = source
+		if source[-4:]==".mid":
+			import readmidi
+			midiFile = readmidi.read_midi_file(source)
+			barlist = readmidi.get_bars_from_midi(midiFile)
+
+		elif source[-4:]==".rhy":
+			#import rhythm_parser 
+			barlist = read_rhythm(source)
+		else:
+			print "Error in syncopation_barlist_permodel(): Unrecognised file type."
+	else:
+		print "Error in syncopation_barlist_permodel(): unrecognised source type."
+	
+	barsDiscarded=0
+	discardedlist = []
+	includedlist = []
+
+	if barlist!=None:
+		for bar in barlist:
+			if not bar.is_empty():
+				barSyncopation = sync_perbar_permodel(model, bar, parameters)
+			else:
+				barSyncopation = None
+				print 'Bar %d cannot be measured because it is empty, returning None.' % barlist.index(bar)
+			
+			barResults.append(barSyncopation)
+			if barSyncopation != None:
+				total += barSyncopation
+				numberOfNotes += sum(bar.get_binary_sequence())
+				includedlist.append(barlist.index(bar))
+			else:
+				barsDiscarded += 1
+				discardedlist.append(barlist.index(bar))
+				print 'Model could not measure bar %d, returning None.' % barlist.index(bar)
+
+		import WNBD
+		if model is WNBD:
+			total =  total / numberOfNotes
+
+		average = total / (len(barResults)-barsDiscarded)
+
+	output = {
+ 			"model_name":model.__name__ , 
+ 			"summed_syncopation":total, 
+ 			"mean_syncopation_per_bar":average, 
+ 			"source":sourceType, 
+ 			"number_of_bars":len(barResults), 
+ 			"number_of_bars_not_measured":barsDiscarded, 
+ 			"bars_with_valid_output":includedlist, 
+ 			"syncopation_by_bar":barResults
+ 			}
+
+ 	if outfile!=None:
+ 		
+ 		if ".xml" in outfile:
+ 			results_to_xml(output,outfile)
+ 		elif ".json" in outfile:
+ 			results_to_json(output,outfile)
+ 		else:
+ 			print "Error in syncopation.py: Unrecognised output file type: ", outfile
+
+ 	return output
+
+
+
+def results_to_xml(results, outputFilename):
+	from xml.etree.ElementTree import Element, ElementTree
+
+	elem = Element("syncopation_results")
+
+	for key, val in results.items():
+		child = Element(key)
+		child.text = str(val)
+		elem.append(child)
+
+	ElementTree(elem).write(outputFilename)
+
+def results_to_json(results, outputFilename):
+	import json
+
+	fileHandle = open(outputFilename, 'w')
+	json.dump(results, fileHandle, sort_keys=True, indent=4, separators=(',', ': '))
+	fileHandle.flush()
+