Mercurial > hg > simscene-py
changeset 8:0f8d8ff0ece7
fixed a bug in calculating end times
author | Emmanouil Thoefanis Chourdakis <e.t.chourdakis@qmul.ac.uk> |
---|---|
date | Tue, 03 Oct 2017 13:56:15 +0100 |
parents | f90eba90a78f |
children | 53ee437b5ba3 |
files | README.md simscene.py |
diffstat | 2 files changed, 39 insertions(+), 9 deletions(-) [+] |
line wrap: on
line diff
--- a/README.md Tue Oct 03 12:14:55 2017 +0100 +++ b/README.md Tue Oct 03 13:56:15 2017 +0100 @@ -1,3 +1,8 @@ +SimScene.py + +An acoustic pattern generation tool +Copyright (c) Emmanouil Theofanis Chourdakis <e.t.chourdakis@qmul.ac.uk> + SimScene.py is a collection of tools to synthesize an audio scene from independent sound events. It is inspired by Lagrange et al. simScene (https://bitbucket.org/mlagrange/simscene) but takes a different direction. The goal of simScene.py is to (1) have a python implementation of such a @@ -8,4 +13,15 @@ This code is licensed under GPLv3. Please see LICENSE for more info. +Usage examples: + + * ./simscene.py -h + Displays command line syntax and option descriptions. + + * ./simscene.py ../simscene-input/sound ../simscene-output 15 -e ../simscene-input/forest_events.txt -vv + Reads sounds from `../simscene-input/sound', and creates a 15 second scene of events. Event options are given in ../simscene-input/forest_events.txt. Finally, generates and displays plots (waveforms, spectrograms and piano rolls). Saves everything to ../simscene-output. + + + +
--- a/simscene.py Tue Oct 03 12:14:55 2017 +0100 +++ b/simscene.py Tue Oct 03 13:56:15 2017 +0100 @@ -28,6 +28,7 @@ # Matplotlib import matplotlib.pyplot as plt import matplotlib.patches as patches +from cycler import cycler # Tabulate from tabulate import tabulate @@ -211,7 +212,7 @@ # Store the successive starting and ending times of the events (given e.g. the model) # in the following lists. start_times = [events_df['start_time'].loc[n]] - end_times = [events_df['end_time'].loc[n]] + end_times = [start_times[-1]+len(wav)/float(SR)] # Start with the first time in the list new_start_time = start_times[-1] @@ -220,7 +221,8 @@ # Until the scene is full while new_start_time < track_end_time: offset = float(mean_time_between_instances) +\ - float(events_df['time_between_instances_stddev'].loc[n]*np.random.randn()) + float(events_df['time_between_instances_stddev'].loc[n]*np.random.randn()) + logging.debug(offset) new_start_time += offset new_end_time += offset @@ -282,19 +284,31 @@ librosa.display.specshow(Fdb, sr=SR, x_axis='time', y_axis='hz') ax = plt.subplot(3,1,3) ax.set_xlim([0,scene_duration]) + ax.set_ylim([-0.5, len(scene_starting_times)-0.5]) + + # Get labels + labels = [s[0] for s in scene_starting_times] + + plt.yticks(range(len(scene_starting_times)), labels) + + plt.rc('lines', linewidth=4) + plt.rc('axes', prop_cycle=(cycler('color', ['r', 'g', 'b', 'y']) + + cycler('linestyle', ['-', '--', ':', '-.']))) for n in range(len(scene_starting_times)): label = scene_starting_times[n][0] start_times = scene_starting_times[n][1] end_times = scene_ending_times[n][1] for m in range(len(start_times)): - plt.text( - start_times[m], - 0.1+n/float(len(scene_starting_times)), - label, - size=9,ha='center',va='center', - bbox=dict(boxstyle='square', ec=(1., 0.5, 0.5), fc=(1., 1-n/float(len(scene_starting_times)), n/float(len(scene_starting_times)))), - ) + plt.axhline(float(n), start_times[m], start_times[m]+1.0) + # for m in range(len(start_times)): + # plt.text( + # start_times[m], + # n, + # label, + # size=9,ha='center',va='center', + # bbox=dict(boxstyle='square', ec=(1., 0.5, 0.5), fc=(1., 1-n/float(len(scene_starting_times)), n/float(len(scene_starting_times)))), + # ) plt.savefig('{}/full-scene.{}'.format(output_path, image_format)) if figure_verbosity > 1: